@inproceedings{1afd1cf9ef6547db9c1d41df8d57390a,
title = "ESPN: Extremely sparse pruned networks",
abstract = "Deep neural networks are often highly over-parameterized, prohibiting their use in compute-limited systems. However, a line of recent works has shown that the size of deep networks can be considerably reduced by identifying a subset of neuron indicators (or mask) that correspond to significant weights prior to training. We demonstrate that a simple iterative mask discovery method can achieve state-of-the-art compression of very deep networks. Our algorithm represents a hybrid approach between single-shot network pruning methods (such as SNIP) with Lottery-Ticket type approaches. We validate our approach on several datasets and outperform several existing pruning approaches in both test accuracy and compression ratio.",
keywords = "Model compression, Neural network pruning, Sparsification",
author = "Minsu Cho and Ameya Joshi and Chinmay Hegde",
note = "Funding Information: The authors are with the Tandon School of Engineering at New York University. The authors thank the NYU HPC team for help with the computing infrastructure, and Anna Choromanska for useful discussions. This work was supported in part by NSF grants CCF-2005804 and CCF-1815101, and DOE grants DE-EE0009105 and DE-AR0001215. Publisher Copyright: {\textcopyright} 2021 IEEE.; 2021 IEEE Data Science and Learning Workshop, DSLW 2021 ; Conference date: 05-06-2021 Through 06-06-2021",
year = "2021",
month = jun,
day = "5",
doi = "10.1109/DSLW51110.2021.9523404",
language = "English (US)",
series = "2021 IEEE Data Science and Learning Workshop, DSLW 2021",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2021 IEEE Data Science and Learning Workshop, DSLW 2021",
}