@article {5025, title = {When and how convolutional neural networks generalize to out-of-distribution category{\textendash}viewpoint combinations}, journal = {Nature Machine Intelligence}, volume = {4}, year = {2022}, month = {02/2022}, pages = {146 - 153}, abstract = {

Object recognition and viewpoint estimation lie at the heart of visual understanding. Recent studies have suggested that convolutional neural networks (CNNs) fail to generalize to out-of-distribution (OOD) category{\textendash}viewpoint combinations, that is, combinations not seen during training. Here we investigate when and how such OOD generalization may be possible by evaluating CNNs trained to classify both object category and three-dimensional viewpoint on OOD combinations, and identifying the neural mechanisms that facilitate such OOD generalization. We show that increasing the number of in-distribution combinations (data diversity) substantially improves generalization to OOD combinations, even with the same amount of training data. We compare learning category and viewpoint in separate and shared network architectures, and observe starkly different trends on in-distribution and OOD combinations, that is, while shared networks are helpful in distribution, separate networks significantly outperform shared ones at OOD combinations. Finally, we demonstrate that such OOD generalization is facilitated by the neural mechanism of specialization, that is, the emergence of two types of neuron{\textemdash}neurons selective to category and invariant to viewpoint, and vice versa.

}, doi = {10.1038/s42256-021-00437-5}, url = {https://www.nature.com/articles/s42256-021-00437-5}, author = {Madan, Spandan and Henry, Timothy and Dozier, Jamell and Ho, Helen and Bhandari, Nishchal and Sasaki, Tomotake and Durand, Fr{\'e}do and Pfister, Hanspeter and Boix, Xavier} }