@article {4880, title = {Dynamics and Neural Collapse in Deep Classifiers trained with the Square Loss}, year = {2021}, abstract = {

We overview several properties -- old and new -- \ of training overparametrized deep networks under the square loss. We first consider a model of the dynamics of gradient flow under the square loss in deep homogeneous ReLU networks. We study the convergence to a solution with the absolute minimum $\rho$, which is the product of the Frobenius norms of each layer weight matrix, when normalization by \ Lagrange multipliers (LM) is used together with Weight Decay (WD) under different forms of gradient descent. A main property of the minimizers that bounds their expected error {\it for a specific network architecture} is $\rho$. In particular, we derive novel norm-based bounds for convolutional layers that are orders of magnitude better than classical bounds for dense networks. Next we prove that quasi-interpolating solutions obtained by Stochastic Gradient Descent (SGD) in the presence of WD have a bias towards low rank weight matrices -- that, as we also explain, should improve generalization. The same analysis predicts the existence of an inherent SGD noise for deep networks. In both cases, we verify our predictions experimentally. We then predict Neural Collapse and its properties without any specific assumption -- unlike other published proofs. Our analysis supports the idea that the advantage of deep networks relative to other classifiers is greater for the problems that are appropriate for sparse deep architectures such as CNNs. The deep reason compositionally sparse \ target functions \ can be approximated well by {\textquoteleft}{\textquoteleft}sparse{\textquoteright}{\textquoteright} deep networks without incurring in the curse of dimensionality.

}, author = {M. Xu and Akshay Rangamani and Andrzej Banburski and Q. Liao and Tomer Galanti and Tomaso Poggio} }