@article {3959, title = {What am I searching for?}, year = {2018}, month = {07/2018}, abstract = {

Can we infer intentions and goals from a person{\textquoteright}s actions? As an example of this family of problems, we consider here whether it is possible to decipher what a person is searching for by decoding their eye movement behavior. We conducted two human psychophysics experiments on object arrays and natural images where we monitored subjects{\textquoteright} eye movements while they were looking for a target object. Using as input the pattern of "error" fixations on non-target objects before the target was found, we developed a model (InferNet) whose goal was to infer what the target was. "Error" fixations share similar features with the sought target. The Infernet model uses a pre-trained 2D convolutional architecture to extract features from the error fixations and computes a 2D similarity map between the error fixation and all locations across the search image by modulating the search image via convolution across layers. InferNet consolidates the modulated response maps across layers via max pooling to keep track of the sub-patterns highly similar to features at error fixations and integrates these maps across all error fixations. InferNet successfully identifies the subject{\textquoteright}s goal and outperforms all the competitive null models, even without any object-specific training on the inference task.

}, author = {Zhang, Mengmi and Feng, Jiashi and Lim, Joo Hwee and Qi Zhao and Gabriel Kreiman} }