@article{jastrzebski_relation_2019, title = {On the Relation Between the Sharpest Directions of {DNN} Loss and the {SGD} Step Length}, url = {http://arxiv.org/abs/1807.05031}, abstract = {Recent work has identified that using a high learning rate or a small batch size for Stochastic Gradient Descent ({SGD}) based training of deep neural networks encourages finding flatter minima of the training loss towards the end of training. Moreover, measures of the flatness of minima have been shown to correlate with good generalization performance. Extending this previous work, we investigate the loss curvature through the Hessian eigenvalue spectrum in the early phase of training and find an analogous bias: even at the beginning of training, a high learning rate or small batch size influences {SGD} to visit flatter loss regions. In addition, the evolution of the largest eigenvalues appears to always follow a similar pattern, with a fast increase in the early phase, and a decrease or stabilization thereafter, where the peak value is determined by the learning rate and batch size. Finally, we find that by altering the learning rate just in the direction of the eigenvectors associated with the largest eigenvalues, {SGD} can be steered towards regions which are an order of magnitude sharper but correspond to models with similar generalization, which suggests the curvature of the endpoint found by {SGD} is not predictive of its generalization properties.}, journaltitle = {Seventh International Conference on Learning Representations}, author = {Jastrzębski, Stanisław and Kenton, Zachary and Ballas, Nicolas and Fischer, Asja and Bengio, Yoshua and Storkey, Amos}, urldate = {2019-02-01}, date = {2019-04-17}, langid = {english}, eprinttype = {arxiv}, eprint = {1807.05031}, keywords = {Computer Science - Machine Learning, Statistics - Machine Learning}, }