@inproceedings{5023cb9b37e84bd28b0ca4bb90798cc4,
title = "Sequential attention: A context-aware alignment function for machine reading",
abstract = "In this paper we propose a neural network model with a novel Sequential Attention layer that extends soft attention by assigning weights to words in an input sequence in a way that takes into account not just how well that word matches a query, but how well surrounding words match. We evaluate this approach on the task of reading comprehension (on the Who did What and CNN datasets) and show that it dramatically improves a strong baseline-the Stanford Reader-and is competitive with the state of the art.",
author = "Sebastian Brarda and Philip Yeres and Bowman, {Samuel R.}",
note = "Funding Information: This paper was the result of a term project for the NYU Course DS-GA 3001, Natural Language Understanding with Distributed Representations. Bowman acknowledges support from a Google Faculty Research Award and gifts from Tencent Holdings and the NVIDIA Corporation. Publisher Copyright: {\textcopyright} 2017 Association for Computational Linguistics.; 2nd Workshop on Representation Learning for NLP, Rep4NLP 2017 at the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017 ; Conference date: 03-08-2017",
year = "2017",
language = "English (US)",
series = "Proceedings of the 2nd Workshop on Representation Learning for NLP, Rep4NLP 2017 at the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017",
publisher = "Association for Computational Linguistics (ACL)",
pages = "75--80",
editor = "Phil Blunsom and Antoine Bordes and Kyunghyun Cho and Shay Cohen and Chris Dyer and Edward Grefenstette and Karl MoritzHermann and Laura Rimell and Jason Weston and Scott Yih",
booktitle = "Proceedings of the 2nd Workshop on Representation Learning for NLP, Rep4NLP 2017 at the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017",
}