@conference {2491, title = {Active Video Summarization: Customized Summaries via On-line Interaction.}, booktitle = {AAAI Conference on Artificial Intelligence}, year = {2017}, author = {Garcia del Molino, A and X Boix and Lim, J. and Tan, A} } @article {2485, title = {Eccentricity Dependent Deep Neural Networks for Modeling Human Vision}, year = {2017}, author = {Gemma Roig and Francis Chen and X Boix and Tomaso Poggio} } @conference {2487, title = {Eccentricity Dependent Deep Neural Networks: Modeling Invariance in Human Vision}, booktitle = {AAAI Spring Symposium Series, Science of Intelligence}, year = {2017}, abstract = {

Humans can recognize objects in a way that is invariant to scale, translation, and clutter. We use invariance theory as a conceptual basis, to computationally model this phenomenon. This theory discusses the role of eccentricity in human visual processing, and is a generalization of feedforward convolutional neural networks (CNNs). Our model explains some key psychophysical observations relating to invariant perception, while maintaining important similarities with biological neural architectures. To our knowledge, this work is the first to unify explanations of all three types of invariance, all while leveraging the power and neurological grounding of CNNs.

}, url = {https://www.aaai.org/ocs/index.php/SSS/SSS17/paper/view/15360}, author = {Francis Chen and Gemma Roig and Leyla Isik and X Boix and Tomaso Poggio} } @article {1617, title = {Foveation-based Mechanisms Alleviate Adversarial Examples}, number = {044}, year = {2016}, month = {01/2016}, abstract = {

We show that adversarial examples,\ i.e.,\ the visually imperceptible perturbations that result in Convolutional Neural Networks (CNNs) fail, can be alleviated with a mechanism based on foveations---applying the CNN in different image regions. To see this, first, we report results in ImageNet that lead to a revision of the hypothesis that adversarial perturbations are a consequence of CNNs acting as a linear classifier: CNNs act locally linearly to changes in the image regions with objects recognized by the CNN, and in other regions the CNN may act non-linearly. Then, we corroborate that when the neural responses are linear, applying the foveation mechanism to the adversarial example tends to significantly reduce the effect of the perturbation. This is because, hypothetically, the CNNs for ImageNet are robust to changes of scale and translation of the object produced by the foveation, but this property does not generalize to transformations of the perturbation. As a result, the accuracy after a foveation is almost the same as the accuracy of the CNN without the adversarial perturbation, even if the adversarial perturbation is calculated taking into account a foveation.

}, author = {Luo, Yan and X Boix and Gemma Roig and Tomaso Poggio and Qi Zhao} }