@article{0966fb858f044abfaf340d8043e25916,
title = "In spoken word recognition, the future predicts the past",
abstract = "Speech is an inherently noisy and ambiguous signal. To fluently derive meaning, a listener must integrate contextual information to guide interpretations of the sensory input. Although many studies have demonstrated the influence of prior context on speech perception, the neural mechanisms supportingthe integrationof subsequent context remainunknown. UsingMEGtorecordfromhumanauditorycortex, we analyzed responses to spoken words with a varyingly ambiguous onset phoneme, the identity of which is later disambiguated at the lexical uniqueness point. Fifty participants (both male and female) were recruited across two MEG experiments. Our findings suggest that primary auditory cortex is sensitive to phonological ambiguity very early during processing at just 50 ms after onset. Subphonemic detail is preserved in auditory cortex over long timescales and re-evoked at subsequent phoneme positions. Commitments to phonological categories occur in parallel, resolving on the shorter timescale of ~450 ms. These findings provide evidence that future input determines the perception of earlier speech sounds by maintaining sensory features until they can be integrated with top-down lexical information.",
keywords = "Auditory processing, Lexical access, MEG, Speech",
author = "Laura Gwilliams and Tal Linzen and David Poeppel and Alec Marantz",
note = "Funding Information: Received Jan. 10, 2018; revised June 6, 2018; accepted July 9, 2018. Author contributions: L.G. wrote the first draft of the paper; T.L., D.P., and A.M. edited the paper. L.G. and A.M. designed research; L.G. performed research; L.G. analyzed data; L.G. wrote the paper. This work was supported by European Research Council (grant ERC-2011-AdG 295810 BOOTPHON) and the Agence Nationale pour la Recherche (grants ANR-10-IDEX-0001-02 PSL and ANR-10-LABX-0087 IEC) to T.L.; the NationalInstitutesofHealth(Grant2R01DC05660toD.P.);andtheNYUAbuDhabi(NYUAD)Institute(GrantG1001 to A.M.). We thank Kyriaki Neophytou for her help with data collection and Lena Warnke for help with stimulus creation. The authors declare no competing financial interests. CorrespondenceshouldbeaddressedtoLauraGwilliams,Linguisticsdepartment,NewYorkUniversity,10Washington Place, New York, NY, 10003. E-mail: laura.gwilliams@nyu.edu. DOI:10.1523/JNEUROSCI.0065-18.2018 Copyright {\textcopyright} 2018 the authors 0270-6474/18/387585-15$15.00/0 Funding Information: This work was supported by European Research Council (grant ERC-2011-AdG 295810 BOOTPHON) and the Agence Nationale pour la Recherche (grants ANR-10-IDEX-0001-02 PSL and ANR-10-LABX-0087 IEC) to T.L.; the National Institutes of Health (Grant 2R01DC05660 to D.P.); and the NYU Abu Dhabi (NYUAD) Institute (Grant G1001 to A.M.). We thank Kyriaki Neophytou for her help with data collection and Lena Warnke for help with stimulus creation. Publisher Copyright: {\textcopyright} 2018 the authors.",
year = "2018",
month = aug,
day = "29",
doi = "10.1523/JNEUROSCI.0065-18.2018",
language = "English (US)",
volume = "38",
pages = "7585--7599",
journal = "Journal of Neuroscience",
issn = "0270-6474",
publisher = "Society for Neuroscience",
number = "35",
}