@article {3111, title = {Object-Oriented Deep Learning}, year = {2017}, month = {10/2017}, abstract = {

We investigate an unconventional direction of research that aims at converting neural networks, a class of distributed, connectionist, sub-symbolic models into a symbolic level with the ultimate goal of achieving AI interpretability and safety. To that end, we propose Object-Oriented Deep Learning, a novel computational paradigm of deep learning that adopts interpretable {\textquotedblleft}objects/symbols{\textquotedblright} as a basic representational atom instead of N-dimensional tensors (as in traditional {\textquotedblleft}feature-oriented{\textquotedblright} deep learning). For visual processing, each {\textquotedblleft}object/symbol{\textquotedblright} can explicitly package common properties of visual objects like its position, pose, scale, probability of being an object, pointers to parts, etc., providing a full spectrum of interpretable visual knowledge throughout all layers. It achieves a form of {\textquotedblleft}symbolic disentanglement{\textquotedblright}, offering one solution to the important problem of disentangled representations and invariance. Basic computations of the network include predicting high-level objects and their properties from low-level objects and binding/aggregating relevant objects together. These computations operate at a more fundamental level than convolutions, capturing convolution as a special case while being significantly more general than it. All operations are executed in an input-driven fashion, thus sparsity and dynamic computation per sample are naturally supported, complementing recent popular ideas of dynamic networks and may enable new types of hardware accelerations. We experimentally show on CIFAR-10 that it can perform flexible visual processing, rivaling the performance of ConvNet, but without using any convolution. Furthermore, it can generalize to novel rotations of images that it was not trained for.

}, author = {Qianli Liao and Tomaso Poggio} }