@article {4499, title = {A neural network trained for prediction mimics diverse features of biological neurons and perception}, journal = {Nature Machine Intelligence}, volume = {2}, year = {2020}, month = {04/2020}, pages = {210 - 219}, abstract = {

Recent work has shown that convolutional neural networks (CNNs) trained on image recognition tasks can serve as valuable models for predicting neural responses in primate visual cortex. However, these models typically require biologically infeasible levels of labelled training data, so this similarity must at least arise via different paths. In addition, most popular CNNs are solely feedforward, lacking a notion of time and recurrence, whereas neurons in visual cortex produce complex time-varying responses, even to static inputs. Towards addressing these inconsistencies with biology, here we study the emergent properties of a recurrent generative network that is trained to predict future video frames in a self-supervised manner. Remarkably, the resulting model is able to capture a wide variety of seemingly disparate phenomena observed in visual cortex, ranging from single-unit response dynamics to complex perceptual motion illusions, even when subjected to highly impoverished stimuli. These results suggest potentially deep connections between recurrent predictive neural network models and computations in the brain, providing new leads that can enrich both fields.

}, doi = {10.1038/s42256-020-0170-9}, url = {http://www.nature.com/articles/s42256-020-0170-9}, author = {William Lotter and Gabriel Kreiman and Cox, David} }