@inproceedings{1afd1cf9ef6547db9c1d41df8d57390a,
title = "ESPN: Extremely sparse pruned networks",
abstract = "Deep neural networks are often highly over-parameterized, prohibiting their use in compute-limited systems. However, a line of recent works has shown that the size of deep networks can be considerably reduced by identifying a subset of neuron indicators (or mask) that correspond to significant weights prior to training. We demonstrate that a simple iterative mask discovery method can achieve state-of-the-art compression of very deep networks. Our algorithm represents a hybrid approach between single-shot network pruning methods (such as SNIP) with Lottery-Ticket type approaches. We validate our approach on several datasets and outperform several existing pruning approaches in both test accuracy and compression ratio.",
keywords = "Model compression, Neural network pruning, Sparsification",
author = "Minsu Cho and Ameya Joshi and Chinmay Hegde",
note = "Publisher Copyright: {\textcopyright} 2021 IEEE.; 2021 IEEE Data Science and Learning Workshop, DSLW 2021 ; Conference date: 05-06-2021 Through 06-06-2021",
year = "2021",
month = jun,
day = "5",
doi = "10.1109/DSLW51110.2021.9523404",
language = "English (US)",
series = "2021 IEEE Data Science and Learning Workshop, DSLW 2021",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2021 IEEE Data Science and Learning Workshop, DSLW 2021",
}