@article{crowley_closer_2019, title = {A Closer Look at Structured Pruning for Neural Network Compression}, url = {http://arxiv.org/abs/1810.04622}, abstract = {Structured pruning is a popular method for compressing a neural network: given a large trained network, one alternates between removing channel connections and fine-tuning; reducing the overall width of the network. However, the efficacy of structured pruning has largely evaded scrutiny. In this paper, we examine {ResNets} and {DenseNets} obtained through structured pruning-and-tuning and make two interesting observations: (i) reduced networks---smaller versions of the original network trained from scratch---consistently outperform pruned networks; (ii) if one takes the architecture of a pruned network and then trains it from scratch it is significantly more competitive. Furthermore, these architectures are easy to approximate: we can prune once and obtain a family of new, scalable network architectures that can simply be trained from scratch. Finally, we compare the inference speed of reduced and pruned networks on hardware, and show that reduced networks are significantly faster. Code is available at https://github.com/{BayesWatch}/pytorch-prunes.}, journaltitle = {{arXiv}:1810.04622 [cs, stat]}, author = {Crowley, Elliot J. and Turner, Jack and Storkey, Amos and O'Boyle, Michael}, urldate = {2020-03-18}, date = {2019-06-07}, eprinttype = {arxiv}, eprint = {1810.04622}, keywords = {Computer Science - Computer Vision and Pattern Recognition, Computer Science - Machine Learning, Statistics - Machine Learning}, }