@inproceedings{065d634d74644f2aa809ccade8c39dd8,
title = "VisualSem: A High-quality Knowledge Graph for Vision & Language",
abstract = "An exciting frontier in natural language understanding (NLU) and generation (NLG) calls for (vision-and-) language models that can efficiently access external structured knowledge repositories. However, many existing knowledge bases only cover limited domains, or suffer from noisy data, and most of all are typically hard to integrate into neural language pipelines. To fill this gap, we release VisualSem: a high-quality knowledge graph (KG) which includes nodes with multilingual glosses, multiple illustrative images, and visually relevant relations. We also release a neural multi-modal retrieval model that can use images or sentences as inputs and retrieves entities in the KG. This multi-modal retrieval model can be integrated into any (neural network) model pipeline. We encourage the research community to use VisualSem for data augmentation and/or as a source of grounding, among other possible uses. VisualSem as well as the multi-modal retrieval models are publicly available and can be downloaded in this URL: https://github.com/iacercalixto/visualsem.",
author = "Houda Alberts and Ningyuan Huang and Deshpande, {Yash R.} and Yibo Liu and Kyunghyun Cho and Clara Vania and Iacer Calixto",
note = "Funding Information: IC has received funding from the European Union{\textquoteright}s Horizon 2020 research and innovation programme under the Marie Sk{\l}odowska-Curie grant agreement No 838188. KC is partly supported by Sam-sung Advanced Institute of Technology (Next Generation Deep Learning: from pattern recognition to AI) and Samsung Research (Improving Deep Learning using Latent Structure). KC also thanks Naver, eBay, NVIDIA, and NSF Award 1922658 for support. CV{\textquoteright}s work on this project at New York University was financially supported by Eric and Wendy Schmidt (made by recommendation of the Schmidt Futures program) and Samsung Research (under the project Improving Deep Learning using Latent Structure) and benefitted from in-kind support by the NYU High-Performance Computing Center. This material is based upon work supported by the National Science Foundation under Grant No. 1922658. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation. Publisher Copyright: {\textcopyright} 2021 Association for Computational Linguistics.; 1st Workshop on Multilingual Representation Learning, MRL 2021 ; Conference date: 11-11-2021",
year = "2021",
language = "English (US)",
series = "MRL 2021 - 1st Workshop on Multilingual Representation Learning, Proceedings of the Conference",
publisher = "Association for Computational Linguistics (ACL)",
pages = "138--152",
editor = "Duygu Ataman and Alexandra Birch and Alexis Conneau and Orhan Firat and Sebastian Ruder and Sahin, {Gozde Gul}",
booktitle = "MRL 2021 - 1st Workshop on Multilingual Representation Learning, Proceedings of the Conference",
}