@inproceedings{ee628d2521b348c5a26b78ad2a2413a0,
title = "Vulnerability of deep reinforcement learning to policy induction attacks",
abstract = "Deep learning classifiers are known to be inherently vulnerable to manipulation by intentionally perturbed inputs, named adversarial examples. In this work, we establish that reinforcement learning techniques based on Deep Q-Networks (DQNs) are also vulnerable to adversarial input perturbations, and verify the transferability of adversarial examples across different DQN models. Furthermore, we present a novel class of attacks based on this vulnerability that enable policy manipulation and induction in the learning process of DQNs. We propose an attack mechanism that exploits the transferability of adversarial examples to implement policy induction attacks on DQNs, and demonstrate its efficacy and impact through experimental study of a game-learning scenario.",
keywords = "Adversarial examples, Deep Q-Learning, Manipulation, Policy induction, Reinforcement learning, Vulnerability",
author = "Vahid Behzadan and Arslan Munir",
note = "Publisher Copyright: {\textcopyright} Springer International Publishing AG 2017.; 13th International Conference on Machine Learning and Data Mining in Pattern Recognition, MLDM 2017 ; Conference date: 15-07-2017 Through 20-07-2017",
year = "2017",
doi = "10.1007/978-3-319-62416-7_19",
language = "English (US)",
isbn = "9783319624150",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "262--275",
editor = "Petra Perner",
booktitle = "Machine Learning and Data Mining in Pattern Recognition - 13th International Conference, MLDM 2017, Proceedings",
}