@article {4185, title = {Theory I: Deep networks and the curse of dimensionality}, journal = {Bulletin of the Polish Academy of Sciences: Technical Sciences}, volume = {66}, year = {2018}, abstract = {

We review recent work characterizing the classes of functions for which deep learning can be exponentially better than shallow learning. Deep convolutional networks are a special case of these conditions, though weight sharing is not the main reason for their exponential advantage.

}, keywords = {convolutional neural networks, deep and shallow networks, deep learning, function approximation}, author = {Tomaso Poggio and Qianli Liao} }