@article {4734, title = {From Associative Memories to Powerful Machines}, year = {2021}, month = {01/2021}, abstract = {

Associative memories were implemented as simple networks of\ threshold neurons by Willshaw and Longuet-Higgins in the\ {\textquoteright}60s. Today{\textquoteright}s deep networks are quite similar: they can be\ regarded as approximating look-up tables, similar to Gaussian\ RBF networks. Thinking about deep networks as large\ associative memories provides a more realistic and sober\ perspective on the promises of deep learning.
Such associative networks are not powerful enough to\  account for intelligent abilities such as language or\ logic. Could evolution have discovered how to go beyond\ simple reflexes and associative memories? \ I will discuss how inventions such as recurrence and hidden states can transform look-up tables in powerful computing machines. In a July 2022 update I\ outline a theory framework explaining\ how deep networks may work, including\ transformers. The framework is based on proven results plus\ a couple of conjectures -- still open.

}, author = {Tomaso Poggio} }