@inproceedings{73d6050cf4694ac183a78a8ccfff023f,
title = "Flowgrad: Using Motion for Visual Sound Source Localization",
abstract = "Most recent work in visual sound source localization relies on semantic audio-visual representations learned in a self-supervised manner and, by design, excludes temporal information present in videos. While it proves to be effective for widely used benchmark datasets, the method falls short for challenging scenarios like urban traffic. This work introduces temporal context into the state-of-the-art methods for sound source localization in urban scenes using optical flow to encode motion information. An analysis of the strengths and weaknesses of our methods helps us better understand the problem of visual sound source localization and sheds light on open challenges for audio-visual scene understanding. The code and pretrained models are publicly available at https://github.com/rrrajjjj/flowgrad",
keywords = "Sound source localization, audio-visual urban scene understanding, explainability",
author = "Rajsuryan Singh and Pablo Zinemanas and Xavier Serra and Bello, {Juan Pablo} and Magdalena Fuentes",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 48th IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2023 ; Conference date: 04-06-2023 Through 10-06-2023",
year = "2023",
doi = "10.1109/ICASSP49357.2023.10094965",
language = "English (US)",
series = "ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing, Proceedings",
}