@article {2617, title = {Statistics of natural reverberation enable perceptual separation of sound and space}, journal = {Proceedings of the National Academy of Sciences}, volume = {113}, year = {2016}, month = {09/2016}, pages = {E7856 - E7865}, abstract = {

In everyday listening, sound reaches our ears directly from a source as well as indirectly via reflections known as reverberation. Reverberation profoundly distorts the sound from a source, yet humans can both identify sound sources and distinguish environments from the resulting sound, via mechanisms that remain unclear. The core computational challenge is that the acoustic signatures of the source and environment are combined in a single signal received by the ear. Here we ask whether our recognition of sound sources and spaces reflects an ability to separate their effects and whether any such separation is enabled by statistical regularities of real-world reverberation. To first determine whether such statistical regularities exist, we measured impulse responses (IRs) of 271 spaces sampled from the distribution encountered by humans during daily life. The sampled spaces were diverse, but their IRs were tightly constrained, exhibiting exponential decay at frequency-dependent rates: Mid frequencies reverberated longest whereas higher and lower frequencies decayed more rapidly, presumably due to absorptive properties of materials and air. To test whether humans leverage these regularities, we manipulated IR decay characteristics in simulated reverberant audio. Listeners could discriminate sound sources and environments from these signals, but their abilities degraded when reverberation characteristics deviated from those of real-world environments. Subjectively, atypical IRs were mistaken for sound sources. The results suggest the brain separates sound into contributions from the source and the environment, constrained by a prior on natural reverberation. This separation process may contribute to robust recognition while providing information about spaces around us.

}, keywords = {auditory scene analysis, environmental acoustics, natural scene statistics, psychoacoustics, Psychophysics}, issn = {0027-8424}, doi = {10.1073/pnas.1612524113}, url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1612524113}, author = {James Traer and Josh H. McDermott} }