@article {1861, title = {A Bayesian nonparametric approach for uncovering rat hippocampal population codes during spatial navigation}, journal = {Journal of Neuroscience Methods}, volume = {263}, year = {2016}, type = {Computational Neuroscience}, chapter = {36}, abstract = {

Rodent hippocampal population codes represent important spatial information about the environment during navigation. Several computational methods have been developed to uncover the neural representation of spatial topology embedded in rodent hippocampal ensemble spike activity. Here we extend our previous work and propose a nonparametric Bayesian approach to infer rat hippocampal population codes during spatial navigation. To tackle the model selection problem, we leverage a nonparametric Bayesian model. Specifically, to analyze rat hippocampal ensemble spiking activity, we apply a hierarchical Dirichlet process-hidden Markov model (HDP-HMM) using two Bayesian inference methods, one based on Markov chain Monte Carlo (MCMC) and the other based on variational Bayes (VB). We demonstrate the effectiveness of our Bayesian approaches on recordings from a freely-behaving rat navigating in an open field environment. We find that MCMC-based inference with Hamiltonian Monte Carlo (HMC) hyperparameter sampling is flexible and efficient, and outperforms VB and MCMC approaches with hyperparameters set by empirical Bayes.

}, author = {Scott W. Linderman and Matthew J. Johnson and Matthew A. Wilson and Zhe Chen} } @conference {2771, title = {Bayesian nonparametric methods for discovering latent structures of rat hippocampal ensemble spikes}, booktitle = {IEEE Workshop on Machine Learning for Signal Processing}, year = {2016}, month = {09/2016}, address = {Salerno, Italy}, author = {Zhe Chen and Scott W. Linderman and Matthew A. Wilson} } @article {1096, title = {Discovering Switching Autoregressive Dynamics in Neural Spike Train Recordings}, year = {2015}, publisher = {Computational and Systems Neuroscience (Cosyne) Abstracts}, address = {Salt Lake City, UT, USA}, abstract = {

Generalized linear models (GLM) are powerful tools for identifying dependence in spiking populations of neurons, both over time and within the population (Paninski,\ 2004). The GLM identifies these dependencies by modeling spiking patterns through a linear regression and an appropriately-selected link function and likelihood. This regression setup is appealing for its simplicity, the wide variety of available priors, the potential for interpretability, and its computational efficiency. However, the GLM suffers from at least three notable deficiencies. First, the model is linear up to the link function, which only allows a limited range of response maps from neural spiking histories. Second, the model{\textquoteright}s parameters are fixed over time, while neural responses may vary due to processes that are exogenous to the population. Third, the generalized linear model presupposes a characteristic time scale for all dynamics, when there may be multiple, varying time scales of neural activity in a given population. Here we seek to address these deficiencies via a switching variant of the generalized linear model. A switching system is one that evolves through a set of discrete states over time, with each state exhibiting its own lowlevel dynamics. For example, the latent state of a hidden Markov model (HMM) can be used to determine the parameters of an autoregressive (AR) process. These HMM-AR models can be used to identify common patterns of linear dependence that vary over time. Bayesian nonparametric versions of HMM-AR models extend these ideas to allow for an infinite number of such patterns to exist a priori, and semi-Markov variants allow the different states to have idiosyncratic duration distributions. Here we develop GLM variants of these switching AR processes and specialize them for neural spiking data. In particular, we exploit recent data augmentation schemes for negative binomial likelihood functions (Pillow and Scott,\ 2012) to make inference tractable in HDP-HSMM-AR models with count-based observations.

}, author = {Matthew J. Johnson and Scott W. Linderman and Sandeep R. Datta and Ryan Adams} } @article {1095, title = {Inferring structured connectivity from spike trains under negative-binomial generalized linear models}, year = {2015}, publisher = {Computational and Systems Neuroscience (Cosyne) Abstracts}, address = {Salt Lake City, UT, USA}, abstract = {

The steady expansion of neural recording capability provides exciting opportunities for discovering unexpected patterns and gaining new insights into neural computation. Realizing these gains requires flexible and accurate yet tractable statistical methods for extracting structure from large-scale neural recordings. Here we present a model for simultaneously recorded multi-neuron spike trains with negative binomial spiking and structured patterns of functional coupling between neurons. We use a generalized linear model (GLM) with negative-binomial observations to describe spike trains, which provides a flexible model for over-dispersed spike counts (i.e., responses with greater-than-Poisson variability), and introduce flexible priors over functional coupling kernels derived from sparse random network models. The coupling kernels capture dependencies between neurons by allowing spiking activity in each neuron to influence future spiking activity in its neighbors. However, these dependencies tend to be sparse, and to have additional structure that is not exploited by standard (e.g., group lasso) regularization methods. For example, neurons may belong to different classes, as is often found in the retina, or they may be characterized by a small number of features, such as a preferred stimulus selectivity. These latent variables lend interpretability to otherwise incomprehensible data. To incorporate these concepts, we decompose the coupling kernels with a weighted network, and leverage latent variable models like the Erd{\textacutedbl}os-Renyi model, stochastic block model, and the latent feature model as priors over the interactions. To perform inference, we exploit recent innovations in negative binomial regression to perform efficient, fully-Bayesian sampling of the posterior distribution over parameters given the data. This provides access to the full posterior distribution over connectivity, and allows underlying network variables to be inferred alongside the low-dimensional latent variables of each neuron. We apply the model to neural data from primate retina and show that it recovers interpretable patterns of interaction between different cell types.

}, author = {Scott W. Linderman and Ryan Adams and Jonathan Pillow} } @article {461, title = {Abstracts of the 2014 Brains, Minds, and Machines Summer Course}, number = {024}, year = {2014}, month = {09/2014}, abstract = {

A compilation of abstracts from the student projects of the 2014 Brains, Minds, and Machines Summer School, held at Woods Hole Marine Biological Lab, May 29 - June 12, 2014.

}, author = {Nadav Amir and Tarek R. Besold and Raffaello Camoriano and Goker Erdogan and Thomas Flynn and Grant Gillary and Jesse Gomez and Ariel Herbert-Voss and Gladia Hotan and Jonathan Kadmon and Scott W. Linderman and Tina T. Liu and Andrew Marantan and Joseph Olson and Garrick Orchard and Dipan K. Pal and Giulia Pasquale and Honi Sanders and Carina Silberer and Kevin A Smith and Carlos Stein N. de Briton and Jordan W. Suchow and M. H. Tessler and Guillaume Viejo and Drew Walker and Leila Wehbe and Andrei Barbu and Leyla Isik and Emily Mackevicius and Yasmine Meroz} } @conference {1055, title = {A framework for studying synaptic plasticity with neural spike train data}, booktitle = {Neural Information Processing Systems}, year = {2014}, month = {12/2014}, abstract = {

Learning and memory in the brain are implemented by complex, time-varying changes in neural circuitry. The computational rules according to which synaptic weights change over time are the subject of much research, and are not precisely understood. Until recently, limitations in experimental methods have made it challenging to test hypotheses about synaptic plasticity on a large scale. However, as such data become available and these barriers are lifted, it becomes necessary to develop analysis techniques to validate plasticity models. Here, we present a highly extensible framework for modeling arbitrary synaptic plasticity rules on spike train data in populations of interconnected neurons. We treat synaptic weights as a (potentially nonlinear) dynamical system embedded in a fully-Bayesian generalized linear model (GLM). In addition, we provide an algorithm for inferring synaptic weight trajectories alongside the parameters of the GLM and of the learning rules. Using this method, we perform model comparison of two proposed variants of the well-known spike-timing-dependent plasticity (STDP) rule, where nonlinear effects play a substantial role. On synthetic data generated from the biophysical simulator NEURON, we show that we can recover the weight trajectories, the pattern of connectivity, and the underlying learning rules.

}, author = {Scott W. Linderman and Christopher Stock and Ryan Adams} } @article {458, title = {A Nonparametric Bayesian Approach to Uncovering Rat Hippocampal Population Codes During Spatial Navigation.}, number = {027}, year = {2014}, month = {11/2014}, abstract = {

Rodent hippocampal population codes represent important spatial information about the environment during navigation. Several computational methods have been developed to uncover the neural representation of spatial topology embedded in rodent hippocampal ensemble spike activity. Here we extend our previous work and propose a nonparametric Bayesian approach to infer rat hippocampal population codes during spatial navigation. To tackle the model selection problem, we leverage a nonparametric Bayesian model. Specifically, to analyze rat hippocampal ensemble spiking activity, we apply a hierarchical Dirichlet process-hidden Markov model (HDP-HMM) using two Bayesian inference methods, one based on Markov chain Monte Carlo (MCMC) and the other based on variational Bayes (VB). We demonstrate the effectiveness of our Bayesian approaches on recordings from a freely-behaving rat navigating in an open field environment. We find that MCMC-based inference with Hamiltonian Monte Carlo (HMC) hyperparameter sampling is flexible and efficient, and outperforms VB and MCMC approaches with hyperparameters set by empirical Bayes.

}, author = {Scott W. Linderman and Matthew J. Johnson and Matthew A. Wilson and Zhe Chen} }