@article{deniz_robustness_2020, title = {Robustness to adversarial examples can be improved with overfitting}, volume = {11}, issn = {1868-808X}, url = {https://doi.org/10.1007/s13042-020-01097-4}, doi = {10.1007/s13042-020-01097-4}, abstract = {Deep learning (henceforth DL) has become most powerful machine learning methodology. Under specific circumstances recognition rates even surpass those obtained by humans. Despite this, several works have shown that deep learning produces outputs that are very far from human responses when confronted with the same task. This the case of the so-called “adversarial examples” (henceforth AE). The fact that such implausible misclassifications exist points to a fundamental difference between machine and human learning. This paper focuses on the possible causes of this intriguing phenomenon. We first argue that the error in adversarial examples is caused by high bias, i.e. by regularization that has local negative effects. This idea is supported by our experiments in which the robustness to adversarial examples is measured with respect to the level of fitting to training samples. Higher fitting was associated to higher robustness to adversarial examples. This ties the phenomenon to the trade-off that exists in machine learning between fitting and generalization.}, language = {en}, number = {4}, urldate = {2020-04-10}, journal = {International Journal of Machine Learning and Cybernetics}, author = {Deniz, Oscar and Pedraza, Anibal and Vallez, Noelia and Salido, Jesus and Bueno, Gloria}, month = apr, year = {2020}, pages = {935--944}, }