@conference {4695, title = {Learning Compositional Rules via Neural Program Synthesis}, booktitle = {Advances in Neural Information Processing Systems 33 pre-proceedings (NeurIPS 2020)}, year = {2020}, month = {12/2020}, abstract = {

Many aspects of human reasoning, including language, require learning rules from very little data. Humans can do this, often learning systematic rules from very few examples, and combining these rules to form compositional rule-based systems. Current neural architectures, on the other hand, often fail to generalize in a compositional manner, especially when evaluated in ways that vary systematically from training. In this work, we present a neuro-symbolic model which learns entire rule systems from a small set of examples. Instead of directly predicting outputs from inputs, we train our model to induce the explicit system of rules governing a set of previously seen examples, drawing upon techniques from the neural program synthesis literature. Our rule-synthesis approach outperforms neural meta-learning techniques in three domains: an artificial instruction-learning domain used to evaluate human learning, the SCAN challenge datasets, and learning rule-based translations of number words into integers for a wide range of human languages.

Code can be found at this https URL | arXive entry

}, url = {https://proceedings.neurips.cc/paper/2020/hash/7a685d9edd95508471a9d3d6fcace432-Abstract.html}, author = {Maxwell Nye and Armando Solar-Lezama and Joshua B. Tenenbaum and Brenden M Lake} } @article {3441, title = {Building machines that learn and think like people.}, journal = {Behavioral and Brain Sciences}, volume = {40}, year = {2017}, month = {2017 Jan}, pages = {e253}, abstract = {

Recent progress in artificial intelligence has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats that of humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn and how they learn it. Specifically, we argue that these machines should (1) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (2) ground learning in intuitive theories of physics and psychology to support and enrich the knowledge that is learned; and (3) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes toward these goals that can combine the strengths of recent neural network advances with more structured cognitive models.

}, issn = {1469-1825}, doi = {https://doi.org/10.1017/S0140525X16001837}, url = {https://www.cambridge.org/core/journals/behavioral-and-brain-sciences/article/building-machines-that-learn-and-think-like-people/A9535B1D745A0377E16C590E14B94993/core-reader}, author = {Brenden M Lake and Ullman, Tomer D and Joshua B. Tenenbaum and Samuel J Gershman} } @article {1984, title = {Building machines that learn and think like people}, year = {2016}, month = {04/2016}, abstract = {

Recent progress in artificial intelligence (AI) has renewed interest in building systems that learn and think like people. Many advances have come from using deep neural networks trained end-to-end in tasks such as object recognition, video games, and board games, achieving performance that equals or even beats humans in some respects. Despite their biological inspiration and performance achievements, these systems differ from human intelligence in crucial ways. We review progress in cognitive science suggesting that truly human-like learning and thinking machines will have to reach beyond current engineering trends in both what they learn, and how they learn it. Specifically, we argue that these machines should (a) build causal models of the world that support explanation and understanding, rather than merely solving pattern recognition problems; (b) ground learning in intuitive theories of physics and psychology, to support and enrich the knowledge that is learned; and (c) harness compositionality and learning-to-learn to rapidly acquire and generalize knowledge to new tasks and situations. We suggest concrete challenges and promising routes towards these goals that can combine the strengths of recent neural network advances with more structured cognitive models.

}, author = {Brenden M Lake and Tomer Ullman and Joshua B. Tenenbaum and Samuel J Gershman} } @article {1566, title = {Human-level concept learning through probabilistic program induction}, journal = {Science}, volume = {350}, year = {2015}, month = {12/11/2015}, pages = {1332-1338 }, abstract = {

People learning new concepts can often generalize successfully from just a single example, yet machine learning algorithms typically require tens or hundreds of examples to perform with similar accuracy. People can also use learned concepts in richer ways than conventional algorithms{\textemdash}for action, imagination, and explanation. We present a computational model that captures these human learning abilities for a large class of simple visual concepts: handwritten characters from the world{\textquoteright}s alphabets. The model represents concepts as simple programs that best explain observed examples under a Bayesian criterion. On a challenging one-shot classification task, the model achieves human-level performance while outperforming recent deep learning approaches. We also present several {\textquotedblleft}visual Turing tests{\textquotedblright} probing the model{\textquoteright}s creative generalization abilities, which in many cases are indistinguishable from human behavior.

}, keywords = {Machine Learning}, doi = {10.1126/science.aab3050 }, url = {http://www.sciencemag.org/content/350/6266/1332.short}, author = {Brenden M Lake and Salakhutdinov, Ruslan and Joshua B. Tenenbaum} }