@article {2761, title = {Why does deep and cheap learning work so well?}, journal = {Journal of Statistical Physics}, volume = {168}, year = {2017}, month = {09/2017}, pages = {1223{\textendash}1247}, chapter = {1223}, abstract = {
We show how the success of deep learning could depend not only on mathematics but also on physics: although well-known mathematical theorems guarantee that neural networks can approximate arbitrary functions well, the class of functions of practical interest can frequently be approximated through {\textquotedblleft}cheap learning{\textquotedblright} with exponentially fewer parameters than generic ones. We explore how properties frequently encountered in physics such as symmetry, locality, compositionality, and polynomial log-probability translate into exceptionally simple neural networks. We further argue that when the statistical process generating the data is of a certain hierarchical form prevalent in physics and machine learning, a deep neural network can be more efficient than a shallow one. We formalize these claims using information theory and discuss the relation to the renormalization group. We prove various {\textquotedblleft}no-flattening theorems{\textquotedblright} showing when efficient linear deep networks cannot be accurately approximated by shallow ones without efficiency loss; for example, we show that n variables cannot be multiplied using fewer than 2n neurons in a single hidden layer.
}, keywords = {Artificial neural networks, deep learning, Statistical physics}, doi = {10.1007/s10955-017-1836-5}, url = {https://link.springer.com/article/10.1007/s10955-017-1836-5}, author = {Henry Lin and Max Tegmark} }