@inproceedings{6562a1df1e9f498e8e5c7d4f9a58c16e,
title = "Robustness and Adversarial Examples in Natural Language Processing",
abstract = "Recent studies show that many NLP systems are sensitive and vulnerable to a small perturbation of inputs and do not generalize well across different datasets. This lack of robustness derails the use of NLP systems in real-world applications. This tutorial aims at bringing awareness of practical concerns about NLP robustness. It targets NLP researchers and practitioners who are interested in building reliable NLP systems. In particular, we will review recent studies on analyzing the weakness of NLP systems when facing adversarial inputs and data with a distribution shift. We will provide the audience with a holistic view of 1) how to use adversarial examples to examine the weakness of NLP models and facilitate debugging; 2) how to enhance the robustness of existing NLP models and defense against adversarial inputs; and 3) how the consideration of robustness affects the real-world NLP applications used in our daily lives. We will conclude the tutorial by outlining future research directions in this area.",
author = "Chang, {Kai Wei} and He He and Robin Jia and Sameer Singh",
note = "Publisher Copyright: {\textcopyright} 2021 Association for Computational Linguistics.; 2021 Conference on Empirical Methods in Natural Language Processing, EMNLP 2021 ; Conference date: 07-11-2021 Through 11-11-2021",
year = "2021",
language = "English (US)",
series = "EMNLP 2021 - 2021 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts",
publisher = "Association for Computational Linguistics (ACL)",
pages = "22--26",
booktitle = "EMNLP 2021 - 2021 Conference on Empirical Methods in Natural Language Processing",
}