@article {3162, title = {Invariant action recognition dataset}, year = {2017}, month = {11/2017}, abstract = {

To study the effect of changes in view and actor on action recognition, we filmed a dataset of five actors performing five different actions (drink, eat, jump, run and walk) on a treadmill from five different views (0, 45, 90, 135, and 180 degrees from the front of the actor/treadmill; the treadmill rather than the camera was rotated in place to acquire from different viewpoints). The dataset was filmed on a fixed, constant background. To avoid low-level object/action confounds (e.g. the action {\textquotedblleft}drink{\textquotedblright} being classified as the only videos with water bottle in the scene) and guarantee that the main sources of variation of visual appearance are due to actions, actors and viewpoint, the actors held the same objects (an apple and a water bottle) in each video, regardless of the action they performed. This controlled design allows us to test hypotheses on the computational mechanisms underlying invariant recognition in the human visual system without having to settle for a synthetic dataset.

More information and the dataset files can be found here - https://doi.org/10.7910/DVN/DMT0PG

}, url = {https://doi.org/10.7910/DVN/DMT0PG}, author = {Andrea Tacchetti and Leyla Isik and Tomaso Poggio} }