@article {5358, title = {Emotion prediction as computation over a generative theory of mind}, journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences}, volume = {381}, year = {2023}, month = {07/2023}, abstract = {

From sparse descriptions of events, observers can make systematic and\ nuanced predictions of what emotions the people involved\ will experience. We propose a formal model of emotion prediction in the context of a public high-stakes social dilemma. This model uses inverse planning to infer a person{\textquoteright}s beliefs and preferences, including social preferences for equity and for maintaining a good reputation. The model then combines these inferred mental contents with the event to compute {\textquoteleft}appraisals{\textquoteright}: whether the situation conformed to the expectations and fulfilled the preferences. We learn functions mapping computed appraisals to emotion labels, allowing the model to match human observers{\textquoteright} quantitative predictions of 20 emotions, including joy, relief, guilt and envy. Model comparison indicates that inferred monetary preferences are not sufficient to explain observers{\textquoteright} emotion predictions; inferred social preferences are factored into predictions for nearly every emotion. Human observers and the model both use minimal individualizing information to adjust predictions of how different people will respond to the same event. Thus, our framework integrates inverse planning, event appraisals and emotion concepts in a single computational model to reverse-engineer people{\textquoteright}s intuitive theory of emotions.

}, keywords = {affective computing, emotion, inverse planning, probabilistic generative model, social intelligence, theory of mind}, issn = {1364-503X}, doi = {10.1098/rsta.2022.0047}, url = {https://royalsocietypublishing.org/doi/10.1098/rsta.2022.0047}, author = {Houlihan, Sean Dae and Kleiman-Weiner, Max and Hewitt, Luke B. and Tenenbaum, Joshua B. and Saxe, Rebecca} } @article {4655, title = {The logic of universalization guides moral judgment}, journal = {Proceedings of the National Academy of Sciences (PNAS)}, year = {2020}, month = {Feb-10-2020}, pages = {202014505}, abstract = {

To explain why an action is wrong, we sometimes say, {\textquotedblleft}What if everybody did that?{\textquotedblright} In other words, even if a single person{\textquoteright}s behavior is harmless, that behavior may be wrong if it would be harmful once universalized. We formalize the process of universalization in a computational model, test its quantitative predictions in studies of human moral judgment, and distinguish it from alternative models. We show that adults spontaneously make moral judgments consistent with the logic of universalization, and report comparable patterns of judgment in children. We conclude that, alongside other well-characterized mechanisms of moral judgment, such as outcome-based and rule-based thinking, the logic of universalizing holds an important place in our moral minds.

}, issn = {0027-8424}, doi = {10.1073/pnas.2014505117}, url = {http://www.pnas.org/lookup/doi/10.1073/pnas.2014505117}, author = {Levine, Sydney and Max Kleiman-Weiner and Laura Schulz and Joshua B. Tenenbaum and Fiery A Cushman} } @proceedings {4387, title = {Finding Friend and Foe in Multi-Agent Games}, year = {2019}, month = {05/2019}, address = {Vancouver, Canada}, abstract = {

Recent breakthroughs in AI for multi-agent games like Go, Poker, and Dota, have seen great strides in recent years. Yet none of these games address the real-life challenge of cooperation in the presence of unknown and uncertain teammates. This challenge is a key game mechanism in hidden role games. Here we develop the DeepRole algorithm, a multi-agent reinforcement learning agent that we test on The Resistance: Avalon, the most popular hidden role game. DeepRole combines counterfactual regret minimization (CFR) with deep value networks trained through self-play. Our algorithm integrates deductive reasoning into vector-form CFR to reason about joint beliefs and deduce partially observable actions. We augment deep value networks with constraints that yield interpretable representations of win probabilities. These innovations enable DeepRole to scale to the full Avalon game. Empirical game-theoretic methods show that DeepRole outperforms other hand-crafted and learned agents in five-player Avalon. DeepRole played with and against human players on the web in hybrid human-agent teams. We find that DeepRole outperforms human players as both a cooperator and a competitor.

}, author = {Jack Serrino and Max Kleiman-Weiner and David C. Parkes and Joshua B. Tenenbaum} } @article {3514, title = {Lucky or clever? From changed expectations to attributions of responsibility}, journal = {Cognition}, year = {2018}, month = {08/2018}, author = {Tobias Gerstenberg and Ullman, Tomer D. and Nagel, Jonas and Max Kleiman-Weiner and D. A. Lagnado and Joshua B. Tenenbaum} } @conference {2590, title = {Coordinate to cooperate or compete: abstract goals and joint intentions in social interaction}, booktitle = {Proceedings of the 38th Annual Conference of the Cognitive Science Society}, year = {2016}, author = {Max Kleiman-Weiner and Ho, Mark K and Austerweil, Joe L and Michael L, Littman and Joshua B. Tenenbaum} }