@article {4843, title = {Evolutionary and biomedical insights from a marmoset diploid genome assembly}, journal = {Nature}, year = {2021}, month = {04/2021}, abstract = {
The accurate and complete assembly of both haplotype sequences of a diploid organism is essential to understanding the role of variation in genome functions, phenotypes, and diseases1. Here, using a trio-binning approach, we present a high-quality, diploid reference genome, with both haplotypes assembled independently at the chromosome level, for the common marmoset (Callithrix jacchus), an important primate model system widely used in biomedical research2,3. The full heterozygosity spectrum between the two haplotypes involves 1.36\% of the genome, much higher than the 0.13\% indicated by the standard single nucleotide heterozygosity estimation alone. The de novo mutation rate is 0.43 {\texttimes} 10-8 per site per generation, where the paternal inherited genome acquired twice as many mutations as the maternal. Our diploid assembly enabled us to discover a recent expansion of the sex differentiated region and unique evolutionary changes in the marmoset Y chromosome. Additionally, we identified many genes with signatures of positive selection that might have contributed to the evolution of Callithrix biological features. Brain related genes were highly conserved between marmosets and humans, though several genes experienced lineage-specific copy number variations or diversifying selection, providing important implications for the application of marmosets as a model system.
}, issn = {0028-0836}, doi = {10.1038/s41586-021-03535-x}, url = {http://www.nature.com/articles/s41586-021-03535-x}, author = {Yang, Chentao and Zhou, Yang and Marcus, Stephanie and Formenti, Giulio and Bergeron, Lucie A. and Song, Zhenzhen and Bi, Xupeng and Bergman, Juraj and Rousselle, Marjolaine Marie C. and Zhou, Chengran and Zhou, Long and Deng, Yuan and Fang, Miaoquan and Xie, Duo and Zhu, Yuanzhen and Tan, Shangjin and Mountcastle, Jacquelyn and Haase, Bettina and Balacco, Jennifer and Wood, Jonathan and Chow, William and Rhie, Arang and Pippel, Martin and Fabiszak, Margaret M. and Koren, Sergey and Fedrigo, Olivier and W. A. Freiwald and Howe, Kerstin and Yang, Huanming and Phillippy, Adam M. and Schierup, Mikkel Heide and Jarvis, Erich D. and Zhang, Guojie} } @article {4869, title = {A fast link between face perception and memory in the temporal pole}, journal = {Science}, year = {2021}, month = {07/2021}, pages = {eabi6671}, abstract = {The question of how the brain recognizes the faces of familiar individuals has been important throughout the history of neuroscience. Cells linking visual processing to person memory have been proposed, but not found. Here we report the discovery of such cells through recordings from an fMRI-identified area in the macaque temporal pole. These cells responded to faces when they were personally familiar. They responded non-linearly to step-wise changes in face visibility and detail, and holistically to face parts, reflecting key signatures of familiar face recognition. They discriminated between familiar identities, as fast as a general face identity area. The discovery of these cells establishes a new pathway for the fast recognition of familiar individuals.
}, issn = {0036-8075}, doi = {10.1126/science.abi6671}, url = {https://www.sciencemag.org/lookup/doi/10.1126/science.abi6671}, author = {Landi, Sofia M. and Viswanathan, Pooja and Serene, Stephen and W. A. Freiwald} } @article {4831, title = {The human endogenous attentional control network includes a ventro-temporal cortical node}, journal = {Nature Communications}, volume = {12}, year = {2021}, month = {02/2021}, abstract = {Endogenous attention is the cognitive function that selects the relevant pieces of sensory information to achieve goals and it is known to be controlled by dorsal fronto-parietal brain areas. Here we expand this notion by identifying a control attention area located in the temporal lobe. By combining a demanding behavioral paradigm with functional neuroimaging and diffusion tractography, we show that like fronto-parietal attentional areas, the human posterior inferotemporal cortex exhibits significant attentional modulatory activity. This area is functionally distinct from surrounding cortical areas, and is directly connected to parietal and frontal attentional regions. These results show that attentional control spans three cortical lobes and overarches large distances through fiber pathways that run orthogonally to the dominant anterior-posterior axes of sensory processing, thus suggesting a different organizing principle for cognitive control.
}, doi = {10.1038/s41467-020-20583-5}, url = {http://www.nature.com/articles/s41467-020-20583-5}, author = {Sani, Ilaria and Stemmann, Heiko and Caron, Bradley and Bullock, Daniel and Stemmler, Torsten and Fahle, Manfred and Pestilli, Franco and W. A. Freiwald} } @article {5075, title = {Joint encoding of facial identity, orientation, gaze, and expression in the middle dorsal face areaSignificance}, journal = {Proceedings of the National Academy of Sciences}, volume = {118}, year = {2021}, month = {04/2021}, abstract = {The last two decades have established that a network of face-selective areas in the temporal lobe of macaque monkeys supports the visual processing of faces. Each area within the network contains a large fraction of face-selective cells. And each area encodes facial identity and head orientation differently. A recent brain-imaging study discovered an area outside of this network selective for naturalistic facial motion, the middle dorsal (MD) face area. This finding offers the opportunity to determine whether coding principles revealed inside the core network would generalize to face areas outside the core network. We investigated the encoding of static faces and objects, facial identity, and head orientation, dimensions which had been studied in multiple areas of the core face-processing network before, as well as facial expressions and gaze. We found that MD populations form a face-selective cluster with a degree of selectivity comparable to that of areas in the core face-processing network. MD encodes facial identity robustly across changes in head orientation and expression, it encodes head orientation robustly against changes in identity and expression, and it encodes expression robustly across changes in identity and head orientation. These three dimensions are encoded in a separable manner. Furthermore, MD also encodes the direction of gaze in addition to head orientation. Thus, MD encodes both structural properties (identity) and changeable ones (expression and gaze) and thus provides information about another animal{\textquoteright}s direction of attention (head orientation and gaze). MD contains a heterogeneous population of cells that establish a multidimensional code for faces.
}, issn = {0027-8424}, doi = {10.1073/pnas.2108283118}, url = {https://pnas.org/doi/full/10.1073/pnas.2108283118}, author = {Yang, Zetian and W. A. Freiwald} } @article {4828, title = {Neuroscience: A Face{\textquoteright}s Journey through Space and Time}, journal = {Current Biology}, volume = {31}, year = {2021}, month = {01/2021}, pages = {R13 - R15}, abstract = {Faces are complex objects of great variety, which the visual brain somehow manages to organize by similarity. Two such orderings in fact exist and one, a new study finds, is transformed into the other over time, enhancing a face{\textquoteright}s distinctiveness.
}, issn = {09609822}, doi = {10.1016/j.cub.2020.10.065}, url = {https://linkinghub.elsevier.com/retrieve/pii/S0960982220316080}, author = {W. A. Freiwald and Hosoya, Haruo} } @article {4448, title = {Efficient inverse graphics in biological face processing}, journal = {Science Advances}, volume = {6}, year = {2020}, month = {03/2020}, pages = {eaax5979}, abstract = {Vision not only detects and recognizes objects, but performs rich inferences about the underlying scene structure that causes the patterns of light we see. Inverting generative models, or {\textquotedblleft}analysis-by-synthesis{\textquotedblright}, presents a possible solution, but its mechanistic implementations have typically been too slow for online perception, and their mapping to neural circuits remains unclear. Here we present a neurally plausible efficient inverse graphics model and test it in the domain of face recognition. The model is based on a deep neural network that learns to invert a three-dimensional face graphics program in a single fast feedforward pass. It explains human behavior qualitatively and quantitatively, including the classic {\textquotedblleft}hollow face{\textquotedblright} illusion, and it maps directly onto a specialized face-processing circuit in the primate brain. The model fits both behavioral and neural data better than state-of-the-art computer vision models, and suggests an interpretable reverse-engineering account of how the brain transforms images into percepts.
}, doi = {10.1126/sciadv.aax5979}, url = {https://advances.sciencemag.org/lookup/doi/10.1126/sciadv.aax5979}, author = {Ilker Yildirim and Mario Belledonne and W. A. Freiwald and Joshua B. Tenenbaum} } @article {4805, title = {Face selective patches in marmoset frontal cortexAbstract}, journal = {Nature Communications}, volume = {11}, year = {2020}, month = {12/2020}, abstract = {In humans and macaque monkeys, socially relevant face processing is accomplished via a distributed functional network that includes specialized patches in frontal cortex. It is unclear whether a similar network exists in New World primates, who diverged ~35 million years from Old World primates. The common marmoset is a New World primate species ideally placed to address this question given their complex social repertoire. Here, we demonstrate the existence of a putative high-level face processing network in marmosets. Like Old World primates, marmosets show differential activation in anterior cingulate and lateral prefrontal cortices while they view socially relevant videos of marmoset faces. We corroborate the locations of these frontal regions by demonstrating functional and structural connectivity between these regions and temporal lobe face patches. Given the evolutionary separation between macaques and marmosets, our results suggest this frontal network specialized for social face processing predates the separation between Platyrrhini and Catarrhini.
}, doi = {10.1038/s41467-020-18692-2}, url = {http://www.nature.com/articles/s41467-020-18692-2}, author = {Schaeffer, David J. and Selvanayagam, Janahan and Johnston, Kevin D. and Menon, Ravi S. and W. A. Freiwald and Everling, Stefan} } @article {4806, title = {Gross means Great}, journal = {Progress in Neurobiology}, volume = {195}, year = {2020}, month = {12/2020}, pages = {101924}, issn = {03010082}, doi = {10.1016/j.pneurobio.2020.101924}, url = {https://linkinghub.elsevier.com/retrieve/pii/S0301008220301799}, author = {W. A. Freiwald} } @article {4504, title = {The neural mechanisms of face processing: cells, areas, networks, and models}, journal = {Current Opinion in Neurobiology}, volume = {60}, year = {2020}, month = {02/2020}, pages = {184 - 191}, abstract = {Since its discovery, the face-processing network in the brain of the macaque monkey has emerged as a model system that allowed for major neural mechanisms of face recognition to be identified {\textendash} with implications for object recognition at large. Populations of face cells encode faces through broad tuning curves, whose shapes change over time. Face representations differ qualitatively across faces areas, and we not only understand the global organization of these specializations, but also some of the transformations between face areas, both feed-forward and feed-back, and the computational principles behind face representations and transformations. Facial information is combined with physical features and mnemonic features in extensions of the core network, which forms an early part of the primate social brain.
}, issn = {09594388}, doi = {10.1016/j.conb.2019.12.007}, url = {https://linkinghub.elsevier.com/retrieve/pii/S0959438819301424}, author = {W. A. Freiwald} } @article {4816, title = {Social interaction networks in the primate brain}, journal = {Current Opinion in Neurobiology}, volume = {65}, year = {2020}, month = {12/2020}, pages = {49 - 58}, abstract = {Primate brains have evolved to understand and engage with their social world. Much about the structure of this world can be gleaned from social interactions. Circuits for the analysis of and participation in social interactions have now been mapped. Increased knowledge about their functional specializations and relative spatial locations promises to greatly improve the understanding of the functional organization of the primate social brain. Detailed electrophysiology, as in the case of the face-processing network, of local operations and functional interactions between areas is necessary to uncover neural mechanisms and computation principles of social cognition. New naturalistic behavioral paradigms, behavioral tracking, and new analytical approaches for parallel non-stationary data will be important components toward a neuroscientific theory of primates{\textquoteright} interactive minds.
}, issn = {09594388}, doi = {10.1016/j.conb.2020.08.012}, url = {https://linkinghub.elsevier.com/retrieve/pii/S0959438820301252}, author = {W. A. Freiwald} } @conference {4546, title = {Analysis of Macaque Monkeys{\textquoteright} Social and Physical Interaction Processing with Eye tracking Data}, booktitle = {The Rockefeller University 2019 Summer Science Research Program (SSRP)}, year = {2019}, month = {08/2019}, address = {New York, NY, USA}, author = {Yutong Zhang and Marciniak, Karolina and W. A. Freiwald} } @article {4503, title = {Evidence for an attentional priority map in inferotemporal cortex}, journal = {Proceedings of the National Academy of Sciences}, volume = {116}, year = {2019}, month = {11/2019}, pages = {23797 - 23805}, abstract = {From incoming sensory information, our brains make selections according to current behavioral goals. This process, selective attention, is controlled by parietal and frontal areas. Here, we show that another brain area, posterior inferotemporal cortex (PITd), also exhibits the defining properties of attentional control. We discovered this area with functional magnetic resonance imaging (fMRI) during an attentive motion discrimination task. Single-cell recordings from PITd revealed strong attentional modulation across 3 attention tasks yet no tuning to task-relevant stimulus features, like motion direction or color. Instead, PITd neurons closely tracked the subject{\textquoteright}s attention state and predicted upcoming errors of attentional selection. Furthermore, artificial electrical PITd stimulation controlled the location of attentional selection without altering feature discrimination. These are the defining properties of a feature-blind priority map encoding the locus of attention. Together, these results suggest area PITd, located strategically to gather information about object properties, as an attentional priority map.
}, issn = {0027-8424}, doi = {10.1073/pnas.1821866116}, url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1821866116}, author = {Stemmann, Heiko and W. A. Freiwald} } @conference {4543, title = {Facial Expression Scoring and Assessment of Facial Movement Kinematics in Non-Human Primates}, booktitle = {The Rockefeller University 2019 Summer Science Research Program (SSRP)}, year = {2019}, month = {08/2019}, address = {New York, NY, USA}, author = {Obiajulu, D. and Yuriria Vazquez and G.A. Ianni and Yazdani, F. and W. A. Freiwald} } @conference {4535, title = {Neural mechanisms supporting facial expressions }, booktitle = {unknown}, year = {2019}, month = {04/2019}, address = {New York, NY, USA}, author = {Yuriria Vazquez and Geena Ianni and W. A. Freiwald} } @conference {4544, title = {Pupillary responses track changes in arousal and attention while exploring a virtual reality environment}, booktitle = {The Rockefeller University 2019 Summer Undergraduate Research Fellowship (SURF) Program}, year = {2019}, month = {08/2019}, address = {New York, NY, USA}, author = {Otero Coronel, Santiago and Phillips-Jones, Taylor and Sani, Ilaria and W. A. Freiwald} } @conference {4545, title = {A Virtual Reality Experimental Approach for Studying How the Brain Implements Attentive Behaviors}, booktitle = {Tri-Institute 2019 Gateways to the Laboratory Summer Program}, year = {2019}, month = {08/2019}, address = {New York, NY, USA}, author = {Phillips-Jones, Taylor and Otero Coronel, Santiago and Sani, Ilaria and W. A. Freiwald} } @conference {4536, title = {Visual Features for Invariant Coding by Face Selective Neurons }, booktitle = {2019 Conference on Cognitive Computational Neuroscience (CCN)}, year = {2019}, month = {09/2019}, address = {Berlin, Germany}, author = {Zarco, Wilbert and W. A. Freiwald} } @article {3536, title = {Comparing human and monkey neural circuits for processing social scenes}, year = {2018}, month = {08/2018}, address = {Seattle, WA}, url = {http://www.cnsorg.org/cns-2018}, author = {Julia Sliwa and S.R. Marvel and G.A. Ianni and W. A. Freiwald} } @article {3535, title = {Comparing human and monkey neural circuits for processing social scenes}, year = {2018}, month = {05/2018}, address = {Brooklyn, NY }, url = {http://www.socialaffectiveneuro.org/conferences.html}, author = {Julia Sliwa and S.R. Marvel and G.A. Ianni and W. A. Freiwald} } @article {4148, title = {Comparing human and monkey neural circuits for processing social scenes}, year = {2018}, month = {11/2017}, author = {Julia Sliwa and S. R. Marvel and G.A. Ianni and W. A. Freiwald} } @article {4149, title = {Comparing human and monkey neural circuits for processing social scenes}, year = {2018}, author = {Julia Sliwa and S. R. Marvel and G.A. Ianni and W. A. Freiwald} } @article {3540, title = {Efficient inverse graphics in biological face processing}, year = {2018}, month = {04/02/2018}, abstract = {The visual system must not only recognize and localize objects, but perform much richer inferences about the underlying causes in the world that give rise to observed sense data. Analyzing scenes by inverting causal generative models, also known as "analysis-by-synthesis", has a long history in computational vision, and these models have some behavioral support, but they are typically too slow to support online perception and have no known mapping to actual neural circuits. Here we present a neurally plausible model for efficiently inverting generative models of images and test it as a precise account of one aspect of high-level vision, the perception of faces. The model is based on a deep neural network that learns to invert a three-dimensional (3D) face graphics program in a single fast feedforward pass. It successfully explains both human behavioral data and multiple levels of neural processing in non-human primates, as well as a classic illusion, the "hollow face" effect. The model also fits qualitatively better than state-of-the-art computer vision models, and suggests an interpretable reverse-engineering account of how images are transformed into scene percepts in the primate ventral stream.
}, url = {https://www.biorxiv.org/content/early/2018/04/02/282798}, author = {Ilker Yildirim and W. A. Freiwald and Tenenbaum J.} } @conference {2822, title = {Causal and compositional generative models in online perception}, booktitle = {39th Annual Conference of the Cognitive Science Society}, year = {2017}, address = {London, UK}, abstract = {From a quick glance or the touch of an object, our brains map sensory signals to scenes composed of rich and detailed shapes and surfaces. Unlike the standard pattern recognition approaches to perception, we argue that this mapping draws on internal causal and compositional models of the outside phys- ical world, and that such internal models underlie the general- ization capacity of human perception. Here, we present a gen- erative model of visual and multisensory perception in which the latent variables encode intrinsic properties of objects such as their shapes and surfaces in addition to their extrinsic prop- erties such as pose and occlusion. These latent variables can be composed in novel ways and are inputs to sensory-specific causal models that output sense-specific signals. We present a novel recognition network that performs efficient inference in the generative model, computing at a speed similar to online perception. We show that our model, but not an alternative baseline model or a lesion of our model, can account for hu- man performance in an occluded face matching task and in a cross-modal visual-to-haptic face matching task.\
From a quick glance or the touch of an object, our brains map sensory signals to scenes composed of rich and detailed shapes and surfaces. Unlike the standard approaches to perception, we argue that this mapping draws on internal causal and compositional models of the physical world and these internal models underlie the generalization capacity of human perception. Here, we present a generative model of visual and multisensory perception in which the latent variables encode intrinsic (e.g., shape) and extrinsic (e.g., occlusion) object properties. Latent variables are inputs to causal models that output sense-specific signals. We present a recognition network that performs efficient inference in the generative model, computing at a speed similar to online perception. We show that our model, but not alternatives, can account for human performance in an occluded face matching task and in a visual-to-haptic face matching task.
}, url = {https://mindmodeling.org/cogsci2017/papers/0266/index.html}, author = {Ilker Yildirim and Michael Janner and Mario Belledonne and Christian Wallraven and W. A. Freiwald and Joshua B. Tenenbaum} } @article {2639, title = {A Causal Relationship Between Face-Patch Activity and Face-Detection Behavior}, journal = {eLife}, year = {2017}, month = {04/2017}, abstract = {The primate brain contains distinct areas densely populated by face-selective neurons. One of these, face-patch ML, contains neurons selective for contrast relationships between face parts. Such contrast-relationships can serve as powerful heuristics for face detection. However, it is unknown whether neurons with such selectivity actually support face-detection behavior. Here, we devised a naturalistic face-detection task and combined it with fMRI-guided pharmacological inactivation of ML to test whether ML is of critical importance for real-world face detection. We found that inactivation of ML impairs face detection. The effect was anatomically specific, as inactivation of areas outside ML did not affect face detection, and it was categorically specific, as inactivation of ML impaired face detection while sparing body and object detection. These results establish that ML function is crucial for detection of faces in natural scenes, performing a critical first step on which other face processing operations can build.
}, keywords = {face patch, fMRI, inactivation, Neuroscience}, doi = {https://doi.org/10.7554/eLife.18558.001}, url = {https://elifesciences.org/articles/18558}, author = {Srivatsun Sadagopan and Wilbert Zarco and W. A. Freiwald} } @article {3537, title = {Comparing human and monkey neural circuits for processing social scenes}, year = {2017}, author = {Julia Sliwa and S. R. Marvel and W. A. Freiwald} } @article {2863, title = {A Dedicated Network for Social Interaction Processing in the Primate Brain}, journal = {Science}, volume = {Vol. 356}, year = {2017}, month = {05/2017}, pages = {pp. 745-749}, abstract = {Primate cognition requires interaction processing.\ Interactions can reveal otherwise hidden properties of intentional agents, such as thoughts and feelings, and of inanimate objects, such as mass and material.\ Where and how interaction analyses are implemented in the brain, is unknown.\ Using whole-brain fMRI in macaque monkeys, we discovered a network centered in medial and ventrolateral prefrontal cortex, engaged in social interaction analysis exclusively.\ Exclusivity of specialization was found for no other function anywhere in the brain.\ Two additional networks, a parieto-premotor and a temporal one, exhibited both social and physical interaction preference, which, in the temporal lobe, mapped onto a fine-grain pattern of object, body, and face selectivity.\ Extent and location of a dedicated system for social interaction analysis suggest this function as an evolutionary forerunner of human mind-reading capabilities.
}, doi = {DOI: 10.1126/science.aam6383 }, url = {http://science.sciencemag.org/content/356/6339/745}, author = {J. Sliwa and W. A. Freiwald} } @article {2614, title = {From agents to actions to interactions: Uncovering multiple social networks in the primate brain}, year = {2017}, author = {J. Sliwa and W. A. Freiwald} } @article {3538, title = {A Network for Social interaction understanding in the primate brain}, year = {2017}, address = {Vancouver, Canada}, author = {Julia Sliwa and W. A. Freiwald} } @article {3935, title = {Two areas for familiar face recognition in the primate brain}, journal = {Science}, volume = {357}, year = {2017}, month = {08/2017}, pages = {591 - 595}, chapter = {591}, abstract = {Familiarity alters face recognition: Familiar faces are recognized more accurately than unfamiliar ones and under difficult viewing conditions when unfamiliar face recognition fails. The neural basis for this fundamental difference remains unknown. Using whole-brain functional magnetic resonance imaging, we found that personally familiar faces engage the macaque face-processing network more than unfamiliar faces. Familiar faces also recruited two hitherto unknown face areas at anatomically conserved locations within the perirhinal cortex and the temporal pole. These two areas, but not the core face-processing network, responded to familiar faces emerging from a blur with a characteristic nonlinear surge, akin to the abruptness of familiar face recognition. In contrast, responses to unfamiliar faces and objects remained linear. Thus, two temporal lobe areas extend the core face-processing network into a familiar face-recognition system.
}, issn = {0036-8075}, doi = {10.1126/science.aan1139}, url = {http://www.sciencemag.org/lookup/doi/10.1126/science.aan1139}, author = {Landi, Sofia M. and W. A. Freiwald} } @article {2327, title = {View-Tolerant Face Recognition and Hebbian Learning Imply Mirror-Symmetric Neural Tuning to Head Orientation}, journal = {Current Biology}, volume = {27}, year = {2017}, month = {01/2017}, pages = {1-6}, abstract = {The primate brain contains a hierarchy of visual areas, dubbed the ventral stream, which rapidly computes object representations that are both specific for object identity and robust against identity-preserving transformations, like depth rotations. Current computational models of object recognition, including recent deep-learning networks, generate these properties through a hierarchy of alternating selectivity-increasing filtering and tolerance-increasing pooling operations, similar to simple-complex cells operations. Here, we prove that a class of hierarchical architectures and a broad set of biologically plausible learning rules generate approximate invariance to identity-preserving transformations at the top level of the processing hierarchy. However, all past models tested failed to reproduce the most salient property of an intermediate representation of a three-level face-processing hierarchy in the brain: mirror-symmetric tuning to head orientation. Here, we demonstrate that one specific biologically plausible Hebb-type learning rule generates mirror-symmetric tuning to bilaterally symmetric stimuli, like faces, at intermediate levels of the architecture and show why it does so. Thus, the tuning properties of individual cells inside the visual stream appear to result from group properties of the stimuli they encode and to reflect the learning rules that sculpted the information-processing system within which they reside.\
}, doi = {http://dx.doi.org/10.1016/j.cub.2016.10.015}, author = {JZ. Leibo and Qianli Liao and F. Anselmi and W. A. Freiwald and Tomaso Poggio} } @article {2612, title = {From agents to actions to interactions: Uncovering multiple social networks in the primate brain}, year = {2016}, abstract = {Our brain continuously decodes the complex visual scenes unwinding in front of us: both the nature of material entities we perceive, such as objects and individuals, and their immaterial interactions. Interactions are recognize quickly and effortlessly by primates: They understand fights, grooming and plays, but also colliding objects that exchange forces following physical laws of classical mechanics. Interactions are fundamental in that they reveal hidden properties of objects, e.g. their weight or material, and of individuals, e.g. their dominance status or relationship, and by doing so they determine and teach the observer about its own position and prospects regarding those entities. However little is known about the brain regions that track and process social and physical interactions. In order to chart these regions, videos of three types of interactions 1) social interactions between monkeys, 2) interactions between monkeys and objects or their environment and 3) physical interactions between objects, were projected to four rhesus monkeys being scanned for fMRI acquisition with contrast agent. Whole-brain activity for watching blocks of interactions was compared to the activity for watching control videos of monkeys making no actions, objects moving with no interactions, landscapes and scrambled motion videos using Fixed Effects (FFX) Generalized Linear Model (GLM) group analysis and conjunction analyses. We show that watching interactions over-activates the STS, but engages also two sets of regions located outside: 1) it activates the fronto-parietal mirror neuron system (mapped independently using a classic localizer) more than watching non-interactive goal directed behaviors that define the system; 2) in the case of social interactions, it additionally exclusively activates the medial-prefrontal cortex (mPFC), a putative temporo-parietal junction homolog and the temporal pole (TP) that appear to correspond to the human mentalizing network. These two networks are fed differentially by patches of STS cortex (mapped independently using a classic Face-Object-Body patch localizer): face patches co-activate with the social brain, while body patches co-activate with both the mirror neuron system and the social brain. These results demonstrate that combining individuals or objects into evocative units modulates basic mechanisms of object and individual perception in the STS, they reveal the mirror neuron system{\textquoteright}s nature of node of convergence between the social and non-social brain, and suggest that human unique and sophisticated mind-reading ability evolved from the faculty shared with our monkey kin to read social interactions.
}, author = {J. Sliwa and W. A. Freiwald} } @article {2122, title = {View-tolerant face recognition and Hebbian learning imply mirror-symmetric neural tuning to head orientation}, year = {2016}, month = {06/2016}, abstract = {The primate brain contains a hierarchy of visual areas, dubbed the ventral stream, which rapidly computes object representations that are both specific for object identity and relatively robust against identity-preserving transformations like depth-rotations [ 33 , 32 , 23 , 13 ]. Current computational models of object recognition, including recent deep learning networks, generate these properties through a hierarchy of alternating selectivity-increasing filtering and tolerance-increasing pooling operations, similar to simple-complex cells operations [ 46 , 8 , 44 , 29 ]. While simulations of these models recapitulate the ventral stream{\textquoteright}s progression from early view-specific to late view-tolerant representations, they fail to generate the most salient property of the intermediate representation for faces found in the brain: mirror-symmetric tuning of the neural population to head orientation [ 16 ]. Here we prove that a class of hierarchical architectures and a broad set of biologically plausible learning rules can provide approximate invariance at the top level of the network. While most of the learning rules do not yield mirror-symmetry in the mid-level representations, we characterize a specific biologically-plausible Hebb-type learning rule that is guaranteed to generate mirror-symmetric tuning to faces tuning at intermediate levels of the architecture.
}, author = {JZ. Leibo and Qianli Liao and W. A. Freiwald and F. Anselmi and Tomaso Poggio} } @article {1007, title = {Contrasting Specializations for Facial Motion within the Macaque Face-Processing System}, journal = {Current Biology}, volume = {25}, year = {2015}, type = {Report}, chapter = {261}, abstract = {Facial motion transmits rich and ethologically vital information
[1, 2], but how the brain interprets this complex signal is
poorly understood. Facial form is analyzed by anatomically
distinct face patches in the macaque brain [3, 4], and facial
motion activates these patches and surrounding areas [5,
6]. Yet, it is not known whether facial motion is processed
by its own distinct and specialized neural machinery, and if
so, what that machinery{\textquoteright}s organization might be. To address
these questions, we used fMRI to monitor the brain activity
of macaque monkeys while they viewed low- and high-level
motion and form stimuli. We found that, beyond classical
motion areas and the known face patch system, moving
faces recruited a heretofore unrecognized face patch.
Although all face patches displayed distinctive selectivity
for face motion over object motion, only two face patches
preferred naturally moving faces, while three others
preferred randomized, rapidly varying sequences of facial
form. This functional divide was anatomically specific,
segregating dorsal from ventral face patches, thereby
revealing a new organizational principle of the macaque
face-processing system.
Faces transmit a wealth of social information. How this information is exchanged between face-processing centers and brain areas supporting social cognition remains largely unclear. Here we identify these routes using resting state functional magnetic resonance imaging in macaque monkeys. We find that face areas functionally connect to specific regions within frontal, temporal, and parietal cortices, as well as subcortical structures supporting emotive, mnemonic, and cognitive functions. This establishes the existence of an extended face-recognition system in the macaque. Furthermore, the face patch resting state networks and the default mode network in monkeys show a pattern of overlap akin to that between the social brain and the default mode network in humans: this overlap specifically includes the posterior superior temporal sulcus, medial parietal, and dorsomedial prefrontal cortex, areas supporting high-level social cognition in humans. Together, these results reveal the embedding of face areas into larger brain networks and suggest that the resting state networks of the face patch system offer a new, easily accessible venue into the functional organization of the social brain and into the evolution of possibly uniquely human social skills.
}, keywords = {Face recognition, neural nerworks, prefrontal cortex, social cognition}, issn = {1545-7885}, doi = {10.1371/journal.pbio.1002245}, url = {http://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.1002245}, author = {Schwiedrzik, Caspar M and Wilbert Zarco and Everling, Stefan and W. A. Freiwald} } @article {1356, title = {Intelligent Information Loss: The Coding of Facial Identity, Head Pose, and Non-Face Information in the Macaque Face Patch System}, journal = {The Journal of Neuroscience }, volume = {35}, year = {2015}, month = {05/2015}, chapter = {7069}, abstract = {Faces are a behaviorally important class of visual stimuli for primates. Recent work in macaque monkeys has identified six discrete face areas where most neurons have higher firing rates to images of faces compared with other objects (Tsao et al., 2006). While neurons in these areas appear to have different tuning (Freiwald and Tsao, 2010; Issa and DiCarlo, 2012), exactly what types of information and, consequently, which visual behaviors neural populations within each face area can support, is unknown. Here we use population decoding to better characterize three of these face patches (ML/MF, AL, and AM). We show that neural activity in all patches contains information that discriminates between the broad categories of face and nonface objects, individual faces, and nonface stimuli. Information is present in both high and lower firing rate regimes. However, there were significant differences between the patches, with the most anterior patch showing relatively weaker representation of nonface stimuli. Additionally, we find that pose-invariant face identity information increases as one moves to more anterior patches, while information about the orientation of the head decreases. Finally, we show that all the information we can extract from the population is present in patterns of activity across neurons, and there is relatively little information in the total activity of the population. These findings give new insight into the representations constructed by the face patch system and how they are successively transformed.
}, doi = {10.1523/JNEUROSCI.3086-14.2015}, url = {http://www.ncbi.nlm.nih.gov/pubmed/25948258}, author = {Ethan Meyers and Mia Borzello and W. A. Freiwald and Doris Tsao} } @article {1466, title = {Whole-agent selectivity within the macaque face-processing system}, journal = {Proceedings of the National Academy of Sciences (PNAS)}, volume = {112}, year = {2015}, month = {10/2015}, chapter = {14717}, abstract = {The primate brain contains a set of face-selective areas, which are thought to extract the rich social information that faces provide, such as emotional state and personal identity. The nature of this information raises a fundamental question about these face-selective areas: Do they respond to a face purely because of its visual attributes, or because the face embodies a larger social agent? Here, we used functional magnetic resonance imaging to determine whether the macaque face patch system exhibits a whole-agent response above and beyond its responses to individually presented faces and bodies. We found a systematic development of whole-agent preference through the face patches, from subadditive integration of face and body responses in posterior face patches to superadditive integration in anterior face patches. Superadditivity was not observed for faces atop nonbody objects, implying categorical specificity of face{\textendash}body interaction. Furthermore, superadditivity was robust to visual degradation of facial detail, suggesting whole-agent selectivity does not require prior face recognition. In contrast, even the body patches immediately adjacent to anterior face areas did not exhibit superadditivity. This asymmetry between face- and body-processing systems may explain why observers attribute bodies{\textquoteright} social signals to faces, and not vice versa. The development of whole-agent selectivity from posterior to anterior face patches, in concert with the recently described development of natural motion selectivity from ventral to dorsal face patches, identifies a single face patch, AF (anterior fundus), as a likely link between the analysis of facial shape and semantic inferences about other agents.
}, issn = {0027-8424}, doi = {10.1073/pnas.1512378112 }, url = {http://www.pnas.org/content/112/47/14717.abstract}, author = {Clark Fisher and W. A. Freiwald} } @article {1037, title = {Explaining Monkey Face Patch System as Efficient Analysis-by-Synthesis}, year = {2014}, author = {Ilker Yildirim and Tejas Kulkarni and W. A. Freiwald and Joshua B. Tenenbaum} }