@article {3398, title = {Brain-Observatory-Toolbox}, year = {2018}, month = {01/2018}, abstract = {
A MATLAB toolbox for interacting with the Allen Brain Observatory.
Software can be accessed and downloaded from the Github repo here - https://github.com/emeyers/Brain-Observatory-Toolbox
}, author = {Dylan Muir and Xinzhu Fang and Ethan Meyers} } @article {3896, title = {Dynamic population coding and its relationship to working memory}, journal = {Journal of Neurophysiology}, volume = {120}, year = {2018}, month = {10/2018}, pages = {2260 - 2268}, abstract = {For over 45 years, neuroscientists have conducted experiments aimed at understanding the neural basis of working memory. Early results examining individual neurons highlighted that information is stored in working memory in persistent sustained activity where neurons maintained elevated firing rates over extended periods of time. However, more recent work has emphasized that information is often stored in working memory in dynamic population codes, where different neurons contain information at different periods in time. In this paper, I review findings that show that both sustained activity as well as dynamic codes are present in the prefrontal cortex and other regions during memory delay periods. I also review work showing that dynamic codes are capable of supporting working memory and that such dynamic codes could easily be {\textquotedblleft}readout{\textquotedblright} by downstream regions. Finally, I discuss why dynamic codes could be useful for enabling animals to solve tasks that involve working memory. Although additional work is still needed to know definitively whether dynamic coding is critical for working memory, the findings reviewed here give insight into how different codes could contribute to working memory, which should be useful for guiding future research.
}, issn = {0022-3077}, doi = {10.1152/jn.00225.2018}, url = {https://www.physiology.org/doi/10.1152/jn.00225.2018}, author = {Ethan Meyers} } @conference {2543, title = {A Data Science approach to analyzing neural data}, booktitle = {Joint Statistical Meetings}, year = {2017}, abstract = {Data Science is a field that uses computational tools to extract insight from large noisy data sets. While Data Science borrows heavily from Statistics (and one could reasonably argue that they are the same field), the culture, approach, and tools used by Data Scientists often differ from those that are more commonly used by Statisticians (Breiman 2001, Donoho, 2015). Additionally, while Data Science approaches are most widely used in industry, scientists in academic fields usually use classical statistical approaches. In this paper we illustrate how a Data Science approach can give useful insights into scientific questions by describing our work using machine learning methods to analyzing neural data. We also outline additional ways in which Neuroscience and other fields could benefit from incorporating more Data Science perspectives into how problems are approached, and areas where Data Science approaches could benefit from more rigorous Statistical methods.\
A longstanding question in neuroscience concerns what is the neural basis underlying working memory. Early work showed that neurons in the prefrontal cortex (PFC) hold information in working memory by having sustained firing rates for extended periods of time, while more recent work has shown that many neurons in the PFC appear to be selective for shorter periods of time and thus information in working memory is contained in a dynamic population code (Meyers et al. 2008, 2012, Stokes et al. 2013). As more results have accumulated, it has become increasingly clear that different studies are leading to different results, with some studies showing predominantly static codes, while other show primarily dynamic codes (King and Dehaene, 2014), however it remains unclear what is leading to these different findings. One possibility is that different brain regions code information differently, and that different studies have recorded neural activity from different regions. To examine this possibility, we recording neural activity from five different subdivision of the PFC (posterior-dorsal, mid-dorsal, anterior-dorsal, posterior-ventral, anterior-ventral PFC) and compared the neural coding properties in these subdivisions. A total of 1856 neurons in four monkeys trained to perform spatial and shape working memory tasks were analyzed. Our results show striking differences in how these subdivisions code information, with some subdivisions containing a completely dynamic code, and other subdivisions containing a completely static code.\ These findings give a potential explanation for discrepancies in the literature and should lead to a deeper understanding of how information is stored in working memory.
}, url = {http://www.abstractsonline.com/pp8/$\#$!/4376/presentation/4782}, author = {Ethan Meyers and Mitchell Riley and Xue-Lian Qi and Christos Constantinidis} } @article {3465, title = {Differential Processing of Isolated Object and Multi-item Pop-Out Displays in LIP and PFC.}, journal = {Cerebral Cortex}, year = {2017}, month = {10/2017}, abstract = {Objects that are highly distinct from their surroundings appear to visually "pop-out." This effect is present for displays in which: (1) a single cue object is shown on a blank background, and (2) a single cue object is highly distinct from surrounding objects; it is generally assumed that these 2 display types are processed in the same way. To directly examine this, we applied a decoding analysis to neural activity recorded from the lateral intraparietal (LIP) area and the dorsolateral prefrontal cortex (dlPFC). Our analyses showed that for the single-object displays, cue location information appeared earlier in LIP than in dlPFC. However, for the display with distractors, location information was substantially delayed in both brain regions, and information first appeared in dlPFC. Additionally, we see that pattern of neural activity is similar for both types of displays and across different color transformations of the stimuli, indicating that location information is being coded in the same way regardless of display type. These results lead us to hypothesize that 2 different pathways are involved processing these 2 types of pop-out displays.
}, keywords = {Attention, lateral intraparietal area, neural decoding, posterior parietal cortex, prefrontal cortex}, issn = {1047-3211}, doi = {10.1093/cercor/bhx243}, url = {https://academic.oup.com/cercor/advance-article/doi/10.1093/cercor/bhx243/4430784}, author = {Ethan Meyers and Andy Liang and Fumi Katsuki and Christos Constantinidis} } @article {2542, title = {New Data Science tools for analyzing neural data and computational models}, year = {2016}, abstract = {As the amount of data collected by neuroscientists continues to increase (Stevenson et al, 2011), new tools are needed to turn this data into insights into about the algorithms that underlie complex behavior (Brown et al, 2004). Here we present our latest research on computational tools we have developed at Hampshire College and at the Center for Brains, Minds and Machines at MIT. In particular, we describe new tools for neural population decoding including a graphical user interface to the Neural Decoding Toolbox (Meyers 2013), methods for analyzing single neurons, and ongoing work on a parallelized population decoding framework that uses R and Apache Spark{\texttrademark} to greatly increase the speed of population decoding. We also discuss CBaaS, which is a distributed platform that allows one to evaluate the effectiveness of different computational models (such as different versions of deep neural networks). These tools will allow researchers to gain deeper insights from the data they collect, and to better assess whether computational models are acting in similar ways to biological systems.\
}, author = {Ethan Meyers and Mike Dean and Gregory J Hale} } @article {2065, title = {Review of the CBMM workshop on the Turing++ Question: {\textquoteright}who is there?{\textquoteright}}, year = {2016}, month = {01/2016}, abstract = {From the 3rd to the 5th of September 2015, the Center for Brains Minds and Machines hosted a workshop to addressed the first Turing++ Question: {\textquoteleft}who is there?{\textquoteright}. The workshop invited experts from the fields of computer vision, cognitive science and neuroscience to engage in a discussion about what are the neural algorithms and the underlying neural circuits that support the ability of humans and other primates to recognize faces. The goal of the workshop was to generate new ideas about how to make progress into understanding the neural algorithms that underlie face identification...
}, author = {Ethan Meyers} } @article {2047, title = {Turing++ Questions: A Test for the Science of (Human) Intelligence.}, journal = { AI Magazine}, volume = {37 }, year = {2016}, month = {03/2016}, pages = {73-77}, abstract = {It is becoming increasingly clear that there is an infinite number of definitions of intelligence. Machines that are intelligent in different narrow ways have been built since the 50s. We are entering now a golden age for the engineering of intelligence and the development of many different kinds of intelligent machines. At the same time there is a widespread interest among scientists in understanding a specific and well defined form of intelligence, that is human intelligence. For this reason we propose a stronger version of the original Turing test. In particular, we describe here an open-ended set of Turing++ Questions that we are developing at the Center for Brains, Minds and Machines at MIT {\textemdash} that is questions about an image. Questions may range from what is there to who is there, what is this person doing, what is this girl thinking about this boy and so on.\ The plural in questions is to emphasize that there are many different intelligent abilities in humans that have to be characterized, and possibly replicated in a machine, from basic visual recognition of objects, to the identification of faces, to gauge emotions, to social intelligence, to language and much more. The term Turing++ is to emphasize that our goal is understanding human intelligence at all Marr{\textquoteright}s levels {\textemdash} from the level of the computations to the level of the underlying circuits. Answers to the Turing++ Questions should thus be given in terms of models that match human behavior and human physiology {\textemdash} the mind and the brain. These requirements are thus well beyond the original Turing test. A whole scientific field that we call the science of (human) intelligence is required to make progress in answering our Turing++ Questions. It is connected to neuroscience and to the engineering of intelligence but also separate from both of them.
}, doi = {http://dx.doi.org/10.1609/aimag.v37i1.2641}, url = {http://www.aaai.org/ojs/index.php/aimagazine/article/view/2641}, author = {Tomaso Poggio and Ethan Meyers} } @article {1773, title = {How PFC and LIP process single and multiple-object {\textquoteleft}pop-out{\textquoteright} displays}, year = {2015}, abstract = {Images in which one object is more salient than its surroundings lead to a {\textquoteleft}pop-out{\textquoteright} effect where subjects show very efficient behavioral responses to the salient object.\ This pop-out effect is present for displays in which: 1) a single object is on a blank background, and 2) a single object is highly distinct from other surrounding objects. Thus it is generally assumed that this pop-out effect arise from the same neural computations for both of these types of displays, and it is thought that this effect is mediated by {\textquotedblleft}bottom-up{\textquotedblright} attentional mechanisms.\
To directly examine whether these two types of displays are indeed processed the same way, we recorded neural activity in LIP and PFC which are two brain regions implicated in attentional processing. Using population decoding methods, in a population of 280 LIP and PFC neurons recorded from two monkeys we observed that when a single isolated object is displayed, information about the object{\textquoteright}s location appeared ~10 ms earlier in LIP than in PFC, which is consistent with a feed-forward account for processing isolated objects. However, when a salient object is presented among multiple distractor objects, information about the location of the salient object was delayed by 60-90 ms in both brain regions, and information now first appeared in PFC. Despite the differences in the latency of information between the two display types, the latency of population firing rate activity was similar for both types of displays. Additionally, we see that pattern of neural activity is very similar for both types of displays (and across different color transformations of the stimuli) indicating that information about the object{\textquoteright}s location is being coded in the same way regardless of display type. These results indicate that there is {\textquoteleft}top-down{\textquoteleft} neural component for processing pop-out displays, and that firing rate latencies can be quite distinct from the latency of when information first appear in a brain region.\ \
}, url = {https://www.sfn.org/~/media/SfN/Documents/Annual\%20Meeting/FinalProgram/NS2015/Full\%20Abstract\%20PDFs\%202015/SfN15_Abstracts_PDF_Nanos.ashx}, author = {Ethan Meyers}, editor = {Andy Liang and Christos Constantinidis} } @article {1356, title = {Intelligent Information Loss: The Coding of Facial Identity, Head Pose, and Non-Face Information in the Macaque Face Patch System}, journal = {The Journal of Neuroscience }, volume = {35}, year = {2015}, month = {05/2015}, chapter = {7069}, abstract = {Faces are a behaviorally important class of visual stimuli for primates. Recent work in macaque monkeys has identified six discrete face areas where most neurons have higher firing rates to images of faces compared with other objects (Tsao et al., 2006). While neurons in these areas appear to have different tuning (Freiwald and Tsao, 2010; Issa and DiCarlo, 2012), exactly what types of information and, consequently, which visual behaviors neural populations within each face area can support, is unknown. Here we use population decoding to better characterize three of these face patches (ML/MF, AL, and AM). We show that neural activity in all patches contains information that discriminates between the broad categories of face and nonface objects, individual faces, and nonface stimuli. Information is present in both high and lower firing rate regimes. However, there were significant differences between the patches, with the most anterior patch showing relatively weaker representation of nonface stimuli. Additionally, we find that pose-invariant face identity information increases as one moves to more anterior patches, while information about the orientation of the head decreases. Finally, we show that all the information we can extract from the population is present in patterns of activity across neurons, and there is relatively little information in the total activity of the population. These findings give new insight into the representations constructed by the face patch system and how they are successively transformed.
}, doi = {10.1523/JNEUROSCI.3086-14.2015}, url = {http://www.ncbi.nlm.nih.gov/pubmed/25948258}, author = {Ethan Meyers and Mia Borzello and W. A. Freiwald and Doris Tsao} } @article {389, title = {The dynamics of invariant object recognition in the human visual system.}, journal = {J Neurophysiol}, volume = {111}, year = {2014}, month = {01/2014}, pages = {91-102}, abstract = {The human visual system can rapidly recognize objects despite transformations that alter their appearance. The precise timing of when the brain computes neural representations that are invariant to particular transformations, however, has not been mapped in humans. Here we employ magnetoencephalography decoding analysis to measure the dynamics of size- and position-invariant visual information development in the ventral visual stream. With this method we can read out the identity of objects beginning as early as 60 ms. Size- and position-invariant visual information appear around 125 ms and 150 ms, respectively, and both develop in stages, with invariance to smaller transformations arising before invariance to larger transformations. Additionally, the magnetoencephalography sensor activity localizes to neural sources that are in the most posterior occipital regions at the early decoding times and then move temporally as invariant information develops. These results provide previously unknown latencies for key stages of human-invariant object recognition, as well as new and compelling evidence for a feed-forward hierarchical model of invariant object recognition where invariance increases at each successive visual area along the ventral stream.
Corresponding Dataset - The dynamics of invariant object recognition in the human visual system.
}, keywords = {Adolescent, Adult, Evoked Potentials, Visual, Female, Humans, Male, Pattern Recognition, Visual, Reaction Time, visual cortex}, issn = {1522-1598}, doi = {10.1152/jn.00394.2013}, url = {http://jn.physiology.org/content/early/2013/09/27/jn.00394.2013.abstract}, author = {Leyla Isik and Ethan Meyers and JZ. Leibo and Tomaso Poggio} } @article {2288, title = {The dynamics of invariant object recognition in the human visual system.}, year = {2014}, month = {01/2014}, abstract = {This is the dataset for corresponding Journal Article - The dynamics of invariant object recognition in the human visual system.
\
The human visual system can rapidly recognize objects despite transformations that alter their appearance. The precise timing of when the brain computes neural representations that are invariant to particular transformations, however, has not been mapped in humans. Here we employ magnetoencephalography decoding analysis to measure the dynamics of size- and position-invariant visual information development in the ventral visual stream. With this method we can read out the identity of objects beginning as early as 60 ms. Size- and position-invariant visual information appear around 125 ms and 150 ms, respectively, and both develop in stages, with invariance to smaller transformations arising before invariance to larger transformations. Additionally, the magnetoencephalography sensor activity localizes to neural sources that are in the most posterior occipital regions at the early decoding times and then move temporally as invariant information develops. These results provide previously unknown latencies for key stages of human-invariant object recognition, as well as new and compelling evidence for a feed-forward hierarchical model of invariant object recognition where invariance increases at each successive visual area along the ventral stream.
\
Dataset files can be downloaded here - http://dx.doi.org/10.7910/DVN/KRUPXZ
11 subjects{\textquoteright} MEG data from Isik et al., 2014. Data is available in raw .fif format or in Matlab raster format that is compatible with the neural decoding toolbox (readout.info).
For Matlab code to pre-process this MEG data, and run the decoding analyses please visit
https://bitbucket.org/lisik/meg_decoding
}, doi = {http://dx.doi.org/10.7910/DVN/KRUPXZ}, author = {Leyla Isik and Ethan Meyers and JZ. Leibo and Tomaso Poggio} } @article {391, title = {The Neural Decoding Toolbox}, year = {2013}, month = {01/2013}, abstract = {The Neural Decoding Toolbox (NDT) is a MATLAB package that makes it easy to apply population decoding analyses to neural activity. The toolbox is designed in a modular fashion making it easy to try different analyses while keeping a core processing stream intact. Using the toolbox one can analyze data from many different types of recording modalities, including spiking data and EEG/MEG signals. The toolbox also allows for more complex analyses such as testing whether information is contained in a dynamic population code, and assessing whether information represented in an abstract format.
}, url = {http://www.readout.info/}, author = {Ethan Meyers} }