@article {5321, title = {Out of sight, out of mind: Responses in primate ventral visual cortex track individual fixations during natural vision}, journal = {bioRxiv}, year = {2023}, month = {02/2023}, abstract = {

During natural vision, primates shift their gaze several times per second with large, ballistic eye movements known as saccades. Open questions remain as to whether visual neurons retain their classical retinotopic response properties during natural vision or whether neurons integrate information across fixations and predict the consequences of impending saccades. Answers are especially wanting for vision in complex scenes relevant to natural behavior. We let 13 monkeys freely view thousands of large natural images, recorded over 883 hours of neuronal responses throughout the ventral visual pathway across 4.7 million fixations, and designed flexible analyses to reveal the spatial, temporal, and feature selectivity of the responses. Ventral visual responses followed each fixation and did not become gaze-invariant as monkeys examined an image over seconds. Computational models revealed that neuronal responses corresponded to eye-centered receptive fields. The results suggest that ventral visual cortex remains predominantly retinotopic during natural vision and does not establish a gaze-independent representation of the world.

}, doi = {10.1101/2023.02.08.527666 }, url = {https://www.biorxiv.org/content/10.1101/2023.02.08.527666v1}, author = {Will Xiao and Saloni Sharma and Gabriel Kreiman and Margaret S. Livingstone} } @article {4146, title = {Evolving Images for Visual Neurons Using a Deep Generative Network Reveals Coding Principles and Neuronal Preferences}, journal = {Cell }, volume = {177}, year = {2019}, month = {05/2019}, pages = {1009}, chapter = {999}, abstract = {

What specific features should visual neurons encode, given the infinity of real-world images and the limited number of neurons available to represent them? We investigated neuronal selectivity in monkey inferotemporal cortex via the vast hypothesis space of a generative deep neural network, avoiding assumptions about features or semantic categories. A genetic algorithm searched this space for stimuli that maximized neuronal firing. This led to the evolution of rich synthetic images of objects with complex combinations of shapes, colors, and textures, sometimes resembling animals or familiar people, other times revealing novel patterns that did not map to any clear\ semantic category. These results expand our conception of the dictionary of features encoded in the cortex, and the approach can potentially reveal the internal representations of any system whose input can be captured by a generative model.

}, doi = {10.1016/j.cell.2019.04.005}, url = {https://www.cell.com/cell/fulltext/S0092-8674(19)30391-5}, author = {Carlos R Ponce and Will Xiao and Peter F Schade and Till S. Hartmann and Gabriel Kreiman and Margaret S Livingstone} } @article {4162, title = {Biologically-plausible learning algorithms can scale to large datasets}, year = {2018}, month = {11/2018}, abstract = {

The backpropagation (BP) algorithm is often thought to be biologically implausible in the brain. One of the main reasons is that BP requires symmetric weight matrices in the feedforward and feedback pathways. To address this "weight transport problem" (Grossberg, 1987), two more biologically plausible algorithms, proposed by Liao et al. (2016) and Lillicrap et al. (2016), relax BP{\textquoteright}s weight symmetry requirements and demonstrate comparable learning capabilities to that of BP on small datasets. However, a recent study by Bartunov et al. (2018) evaluate variants of target-propagation (TP) and feedback alignment (FA) on MINIST, CIFAR, and ImageNet datasets, and find that although many of the proposed algorithms perform well on MNIST and CIFAR, they perform significantly worse than BP on ImageNet. Here, we additionally evaluate the sign-symmetry algorithm (Liao et al., 2016), which differs from both BP and FA in that the feedback and feedforward weights share signs but not magnitudes. We examine the performance of sign-symmetry and feedback alignment on ImageNet and MS COCO datasets using different network architectures (ResNet-18 and AlexNet for ImageNet, RetinaNet for MS COCO). Surprisingly, networks trained with sign-symmetry can attain classification performance approaching that of BP-trained networks. These results complement the study by Bartunov et al. (2018), and establish a new benchmark for future biologically plausible learning algorithms on more difficult datasets and more complex architectures.

}, author = {Will Xiao and Honglin Chen and Qianli Liao and Tomaso Poggio} }