@conference {4695, title = {Learning Compositional Rules via Neural Program Synthesis}, booktitle = {Advances in Neural Information Processing Systems 33 pre-proceedings (NeurIPS 2020)}, year = {2020}, month = {12/2020}, abstract = {

Many aspects of human reasoning, including language, require learning rules from very little data. Humans can do this, often learning systematic rules from very few examples, and combining these rules to form compositional rule-based systems. Current neural architectures, on the other hand, often fail to generalize in a compositional manner, especially when evaluated in ways that vary systematically from training. In this work, we present a neuro-symbolic model which learns entire rule systems from a small set of examples. Instead of directly predicting outputs from inputs, we train our model to induce the explicit system of rules governing a set of previously seen examples, drawing upon techniques from the neural program synthesis literature. Our rule-synthesis approach outperforms neural meta-learning techniques in three domains: an artificial instruction-learning domain used to evaluate human learning, the SCAN challenge datasets, and learning rule-based translations of number words into integers for a wide range of human languages.

Code can be found at this https URL | arXive entry

}, url = {https://proceedings.neurips.cc/paper/2020/hash/7a685d9edd95508471a9d3d6fcace432-Abstract.html}, author = {Maxwell Nye and Armando Solar-Lezama and Joshua B. Tenenbaum and Brenden M Lake} } @proceedings {4384, title = {Write, Execute, Assess: Program Synthesis with a REPL}, year = {2019}, month = {11/2019}, address = {Vancouver, Canada}, abstract = {

We present a neural program synthesis approach integrating components which write, execute, and assess code to navigate the search space of possible programs. We equip the search process with an interpreter or a read-eval-print-loop (REPL), which immediately executes partially written programs, exposing their semantics. The REPL addresses a basic challenge of program synthesis: tiny changes in syntax can lead to huge changes in semantics. We train a pair of models, a policy that proposes the new piece of code to write, and a value function that assesses the prospects of the code written so-far. At test time we can combine these models with a Sequential Monte Carlo algorithm. We apply our approach to two domains: synthesizing text editing programs and inferring 2D and 3D graphics programs.

}, author = {Kevin Ellis and Maxwell Nye and Yewen Pu and Felix Sosa and Joshua B. Tenenbaum and Armando Solar-Lezama} }