@proceedings {2793, title = {Markov transitions between attractor states in a recurrent neural network}, year = {2017}, abstract = {

Stochasticity is an essential part of explaining the world. Increasingly, neuroscientists and cognitive scientists are identifying mechanisms whereby the brain uses probabilistic reasoning in representational, predictive, and generative settings. But stochasticity is not always useful: robust perception and memory retrieval require representations that are immune to corruption by stochastic noise. In an effort to combine these robust representations with stochastic computation, we present an architecture that generalizes traditional recurrent attractor networks to follow probabilistic Markov dynamics between stable and noise-resistant fixed points.

}, author = {Ishita Dasgupta and Jeremy Bernstein and David Rolnick and Haim Sompolinsky} }