@article {3513, title = {Learning physical parameters from dynamic scenes.}, journal = {Cognitive Psychology}, volume = {104}, year = {2018}, month = {8/2018}, pages = {57-82}, abstract = {

Humans acquire their most basic physical concepts early in development, and continue to enrich and expand their intuitive physics throughout life as they are exposed to more and varied dynamical environments. We introduce a hierarchical Bayesian framework to explain how people can learn physical parameters at multiple levels. In contrast to previous Bayesian models of theory acquisition (Tenenbaum et al., 2011), we work with more ex- pressive probabilistic program representations suitable for learning the forces and properties that govern how objects interact in dynamic scenes unfolding over time. We compare our model to human learners on a challenging task of estimating multiple physical parameters in novel microworlds given short movies. This task requires people to reason simultane- ously about multiple interacting physical laws and properties. People are generally able to learn in this setting and are consistent in their judgments. Yet they also make systematic errors indicative of the approximations people might make in solving this computationally demanding problem with limited computational resources. We propose two approximations that complement the top-down Bayesian approach. One approximation model relies on a more bottom-up feature-based inference scheme. The second approximation combines the strengths of the bottom-up and top-down approaches, by taking the feature-based inference as its point of departure for a search in physical-parameter space.

}, keywords = {intuitive physics, intuitive theory, learning, physical reasoning, probabilistic inference}, doi = {10.1016/j.cogpsych.2017.05.006}, url = {https://www-sciencedirect-com.libproxy.mit.edu/science/article/pii/S0010028517301822}, author = {Ullman, Tomer D. and Stuhlm{\"u}ller, Andreas and Noah D. Goodman and Joshua B. Tenenbaum} } @article {3088, title = {Eye-Tracking Causality}, journal = {Psychological Science}, volume = {73}, year = {2017}, month = {10/2017}, abstract = {

How do people make causal judgments? What role, if any, does counterfactual simulation play? Counterfactual theories of causal judgments predict that people compare what actually happened with what would have happened if the candidate cause had been absent. Process theories predict that people focus only on what actually happened, to assess the mechanism linking candidate cause and outcome. We tracked participants{\textquoteright} eye movements while they judged whether one billiard ball caused another one to go through a gate or prevented it from going through. Both participants{\textquoteright} looking patterns and their judgments demonstrated that counterfactual simulation played a critical role. Participants simulated where the target ball would have gone if the candidate cause had been removed from the scene. The more certain participants were that the outcome would have been different, the stronger the causal judgments. These results provide the first direct evidence for spontaneous counterfactual simulation in an important domain of high-level cognition.

}, keywords = {causality, counterfactuals, eye tracking, intuitive physics, mental simulation, open data, open materials}, issn = {0956-7976}, doi = {10.1177/0956797617713053}, url = {http://journals.sagepub.com/doi/10.1177/0956797617713053}, author = {Tobias Gerstenberg and M.F. Peterson and Noah D. Goodman and D. A. Lagnado and Joshua B. Tenenbaum} } @article {3444, title = {Eye-Tracking Causality}, journal = {Psychological Science}, year = {2017}, abstract = {

How do people make causal judgments? What role, if any, does counterfactual simulation play? Counterfactual theories of causal judgments predict that people compare what actually happened with what would have happened if the candidate cause had been absent. Process theories predict that people focus only on what actually happened, to assess the mechanism linking candidate cause and outcome. We tracked participants{\textquoteright} eye movements while they judged whether one billiard ball caused another one to go through a gate or prevented it from going through. Both participants{\textquoteright} looking patterns and their judgments demonstrated that counterfactual simulation played a critical role. Participants simulated where the target ball would have gone if the candidate cause had been removed from the scene. The more certain participants were that the outcome would have been different, the stronger the causal judgments. These results provide the first direct evidence for spontaneous counterfactual simulation in an important domain of high-level cognition.

}, keywords = {causality, counterfactuals, eye tracking, intuitive physics, mental simulation}, author = {Tobias Gerstenberg and M.F. Peterson and Noah D. Goodman and D. A. Lagnado and Joshua B. Tenenbaum} } @article {1823, title = {Pragmatic Reasoning through Semantic Inference}, journal = {Semantics and Pragmatics}, volume = { Vol 9 (2016) }, year = {2016}, abstract = {

A number of recent proposals have used techniques from game theory and Bayesian cognitive science to formalize Gricean pragmatic reasoning (Franke 2009, Frank \& Goodman 2012, Goodman \& Stuhlmüller 2013, J{\"a}ger 2012). We discuss two phenomena which pose a challenge to these accounts of pragmatics: M-implicatures (Horn 1984) and embedded implicatures which violate Hurford{\textquoteright}s constraint (Hurford 1974, Chierchia et al. 2012). While techniques have been developed for deriving M-implicatures, Hurford-violating embedded implicatures pose a more fundamental challenge, because of basic limitations in the models{\textquoteright} architecture. In order to explain these phenomena, we propose a realignment of the division between semantic content and pragmatic content. Under this proposal, the semantic content of an utterance is not fixed independent of pragmatic inference; rather, pragmatic inference partially determines an utterance{\textquoteright}s semantic content. We show how semantic inference can be realized as an extension to the Rational Speech Acts framework (Goodman \& Stuhlmüller 2013). The addition of lexical uncertainty derives both M-implicatures and the relevant embedded implicatures, and preserves the derivations of more standard implicatures. We use this principle to explain a novel class of implicature, non-convex disjunctive implicatures, which have several theoretically interesting properties. In particular, these implicatures can be preserved in downward-entailing contexts in the absence of accenting, a property which is predicted by lexical uncertainty, but which violates prior generalizations in the literature (Horn 1989, Fox \& Spector Forthcoming).


This is an early access version of Bergen, Leon, Roger Levy, Noah Goodman. 2016. Pragmatic reasoning through semantic inference. Semantics and Pragmatics 9(20). This version will be replaced with the final typeset version in due course. NB:page numbers will change, so cite with caution.

This is an open-access article distributed under the terms of a Creative Commons Attribution License (http://creativecommons.org/licenses/by/3.0/).

}, issn = {ISSN: 1937-8912}, doi = {http://dx.doi.org/10.3765/sp.9.20}, url = {http://semprag.org/article/view/sp.9.20}, author = {Leon Bergen and Roger Levy and Noah D. Goodman} } @proceedings {755, title = {How, whether, why: Causal judgments as counterfactual contrasts}, year = {2015}, month = {07/22/2015}, pages = {782-787}, address = {Pasadena, CA}, issn = {978-0-9911967-2-2}, url = {https://mindmodeling.org/cogsci2015/papers/0142/index.html}, author = {Tobias Gerstenberg and Noah D. Goodman and D. A. Lagnado and Joshua B. Tenenbaum} } @article {449, title = {Concepts in a Probabilistic Language of Thought.}, number = {010}, year = {2014}, month = {06/2014}, abstract = {

Knowledge organizes our understanding of the world, determining what we expect given what we have already seen. Our predictive representations have two key properties: they are productive, and they are graded. Productive generalization is possible because our knowledge decomposes into concepts{\textemdash}elements of knowledge that are combined and recombined to describe particular situations. Gradedness is the observable effect of accounting for uncertainty{\textemdash}our knowledge encodes degrees of belief that lead to graded probabilistic predictions. To put this a different way, concepts form a combinatorial system that enables description of many different situations; each such situation specifies a distribution over what we expect to see in the world, given what we have seen. We may think of this system as a probabilistic language of thought (PLoT) in which representations are built from language-like composition of concepts and the content of those representations is a probability distribution on world states. The purpose of this chapter is to formalize these ideas in computational terms, to illustrate key properties of the PLoT approach with a concrete example, and to draw connections with other views of
conceptual structure.

Note: The book chapter is reprinted courtesy of The MIT Press, from the forthcoming edited collection {\textquotedblleft}The Conceptual Mind: New Directions in the Study of Concepts{\textquotedblright} edited by Eric Margolis and Stephen Laurence, print date Spring 2015.

}, keywords = {Development of Intelligence}, author = {Noah D. Goodman and Joshua B. Tenenbaum and Tobias Gerstenberg} } @conference {1320, title = {The strategic use of noise in pragmatic reasoning}, year = {2014}, author = {Leon Bergen and Noah D. Goodman} }