@article {449, title = {Concepts in a Probabilistic Language of Thought.}, number = {010}, year = {2014}, month = {06/2014}, abstract = {
Knowledge organizes our understanding of the world, determining what we expect given what we have already seen. Our predictive representations have two key properties: they are productive, and they are graded. Productive generalization is possible because our knowledge decomposes into concepts{\textemdash}elements of knowledge that are combined and recombined to describe particular situations. Gradedness is the observable effect of accounting for uncertainty{\textemdash}our knowledge encodes degrees of belief that lead to graded probabilistic predictions. To put this a different way, concepts form a combinatorial system that enables description of many different situations; each such situation specifies a distribution over what we expect to see in the world, given what we have seen. We may think of this system as a probabilistic language of thought (PLoT) in which representations are built from language-like composition of concepts and the content of those representations is a probability distribution on world states. The purpose of this chapter is to formalize these ideas in computational terms, to illustrate key properties of the PLoT approach with a concrete example, and to draw connections with other views of
conceptual structure.
Note: The book chapter is reprinted courtesy of The MIT Press, from the forthcoming edited collection {\textquotedblleft}The Conceptual Mind: New Directions in the Study of Concepts{\textquotedblright} edited by Eric Margolis and Stephen Laurence, print date Spring 2015.
}, keywords = {Development of Intelligence}, author = {Noah D. Goodman and Joshua B. Tenenbaum and Tobias Gerstenberg} }