@inproceedings{6e7cf23296784e0d8437c0a2899b4c6d,
title = "longhorns at DADC 2022: How many linguists does it take to fool a Question Answering model? A systematic approach to adversarial attacks",
abstract = "Developing methods to adversarially challenge NLP systems is a promising avenue for improving both model performance and interpretability. Here, we describe the approach of the team “longhorns” on Task 1 of the The First Workshop on Dynamic Adversarial Data Collection (DADC), which asked teams to manually fool a model on an Extractive Question Answering task. Our team finished first, with a model error rate of 62%. We advocate for a systematic, linguistically informed approach to formulating adversarial questions, and we describe the results of our pilot experiments, as well as our official submission.",
author = "Venelin Kovatchev and Trina Chatterjee and Govindarajan, {Venkata S.} and Jifan Chen and Eunsol Choi and Gabriella Chronis and Anubrata Das and Katrin Erk and Matthew Lease and Li, {Junyi Jessy} and Yating Wu and Kyle Mahowald",
note = "Publisher Copyright: {\textcopyright} 2022 Association for Computational Linguistics.; 1st Workshop on Dynamic Adversarial Data Collection, DADC 2022 ; Conference date: 14-07-2022",
year = "2022",
language = "English (US)",
series = "DADC 2022 - 1st Workshop on Dynamic Adversarial Data Collection, Proceedings of the Workshop",
publisher = "Association for Computational Linguistics (ACL)",
pages = "41--52",
editor = "Max Bartolo and Kirk, {Hannah Rose} and Pedro Rodriguez and Katerina Margatina and Tristan Thrush and Robin Jia and Pontus Stenetorp and Adina Williams and Douwe Kiela",
booktitle = "DADC 2022 - 1st Workshop on Dynamic Adversarial Data Collection, Proceedings of the Workshop",
}