<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2025.1538511</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Psychology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Auditory working memory mechanisms mediating the relationship between musicianship and auditory stream segregation</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Liu</surname> <given-names>Martha</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2882965/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Arseneau-Bruneau</surname> <given-names>Isabelle</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2949468/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Farr&#x00E9;s Franch</surname> <given-names>Marcel</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/425802/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Latorre</surname> <given-names>Marie-Elise</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2912148/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Samuels</surname> <given-names>Joshua</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Issa</surname> <given-names>Emily</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Payumo</surname> <given-names>Alexandre</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Rahman</surname> <given-names>Nayemur</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Loureiro</surname> <given-names>Na&#x00ED;ma</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Leung</surname> <given-names>Tsz Chun Matthew</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Nave</surname> <given-names>Karli M.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2948385/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>von Handorf</surname> <given-names>Kristi M.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Hoddinott</surname> <given-names>Joshua D.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Coffey</surname> <given-names>Emily B. J.</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/105939/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Grahn</surname> <given-names>Jessica</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/10952/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zatorre</surname> <given-names>Robert J.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/10814/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Montreal Neurological Institute, McGill University</institution>, <addr-line>Montreal, QC</addr-line>, <country>Canada</country></aff>
<aff id="aff2"><sup>2</sup><institution>Centre for Research in Brain, Language and Music</institution>, <addr-line>Montreal, QC</addr-line>, <country>Canada</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Psychology and Centre for Brain and Mind, Western University</institution>, <addr-line>London, ON</addr-line>, <country>Canada</country></aff>
<aff id="aff4"><sup>4</sup><institution>Department of Psychology, Concordia University</institution>, <addr-line>Montreal, QC</addr-line>, <country>Canada</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0001">
<p>Edited by: Hirohito M. Kondo, Chukyo University, Japan</p>
</fn>
<fn fn-type="edited-by" id="fn0002">
<p>Reviewed by: Paulo Est&#x00EA;v&#x00E3;o Andrade, Goldsmiths University of London, United Kingdom</p>
<p>Claudia Metzler-Baddeley, Cardiff University, United Kingdom</p>
<p>Fionnuala Rogers, Cardiff University, United Kingdom, in collaboration with reviewer CM-B</p>
<p>B&#x00FC;lent &#x015E;erbet&#x00E7;io&#x011F;lu, Istanbul Medipol University, T&#x00FC;rkiye</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Martha Liu, <email>marthaliu0203@gmail.com</email>; Robert J. Zatorre, <email>robert.zatorre@mcgill.ca</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>28</day>
<month>03</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>16</volume>
<elocation-id>1538511</elocation-id>
<history>
<date date-type="received">
<day>02</day>
<month>12</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>25</day>
<month>02</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2025 Liu, Arseneau-Bruneau, Farr&#x00E9;s Franch, Latorre, Samuels, Issa, Payumo, Rahman, Loureiro, Leung, Nave, von Handorf, Hoddinott, Coffey, Grahn and Zatorre.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Liu, Arseneau-Bruneau, Farr&#x00E9;s Franch, Latorre, Samuels, Issa, Payumo, Rahman, Loureiro, Leung, Nave, von Handorf, Hoddinott, Coffey, Grahn and Zatorre</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>This study investigates the interactions between musicianship and two auditory cognitive processes: auditory working memory (AWM) and stream segregation. The primary hypothesis is that AWM could mediate a relationship between musical training and enhanced stream segregation capabilities. Two groups of listeners were tested: the first aimed to establish the relationship between the three variables, and the second aimed to replicate the effect in an independent sample. Music experience history and behavioral data were collected from a total of 145 healthy young adults with normal binaural hearing. The AWM task involved the manipulation of tonal patterns in working memory, while the Music-in-Noise Task (MINT) measured stream segregation abilities in a tonal context. The MINT expands measurements beyond traditional Speech-in-Noise assessments by capturing auditory subskills (rhythm, visual, spatial attention, prediction) relevant to stream segregation. Our results showed that musical training is associated with enhanced AWM and MINT performance and that this effect is replicable across independent samples. Moreover, we found in both samples that the enhancement of stream segregation was largely mediated by AWM capacity. The results suggest that musical training and/or aptitude enhances stream segregation by way of improved AWM capacity.</p>
</abstract>
<kwd-group>
<kwd>auditory stream segregation</kwd>
<kwd>auditory working memory</kwd>
<kwd>hearing-in-noise</kwd>
<kwd>musical training</kwd>
<kwd>music perception</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="6"/>
<equation-count count="0"/>
<ref-count count="82"/>
<page-count count="14"/>
<word-count count="10899"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Auditory Cognitive Neuroscience</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<title>Introduction</title>
<p>Navigating the symphony of sounds that simultaneously converge upon our ears poses a multifaceted challenge to the human auditory system&#x2019;s ability to distinguish distinct perceptual objects (<xref ref-type="bibr" rid="ref10">Bregman, 1990</xref>), thus playing a pivotal role in organizing our auditory perception (<xref ref-type="bibr" rid="ref67">Shamma and Micheyl, 2010</xref>). This cognitive function is influenced by both stimulus-driven grouping strategies (<xref ref-type="bibr" rid="ref56">Noorden, 1975</xref>; <xref ref-type="bibr" rid="ref20">Deroche et al., 2017</xref>; <xref ref-type="bibr" rid="ref11">Bregman and Pinker, 1978</xref>) and cognitive top-down factors (<xref ref-type="bibr" rid="ref3">Anderson et al., 2013</xref>; <xref ref-type="bibr" rid="ref18">Davis and Johnsrude, 2007</xref>; <xref ref-type="bibr" rid="ref74">Thompson et al., 2011</xref>). At the cognitive level, stream segregation involves various factors such as the listener&#x2019;s attention and attentional load (<xref ref-type="bibr" rid="ref33">Heinrich et al., 2008</xref>; <xref ref-type="bibr" rid="ref74">Thompson et al., 2011</xref>), prior knowledge (<xref ref-type="bibr" rid="ref18">Davis and Johnsrude, 2007</xref>), inhibitory control (<xref ref-type="bibr" rid="ref47">Lewis et al., 2021</xref>; <xref ref-type="bibr" rid="ref71">Stenb&#x00E4;ck et al., 2022</xref>), and schematic expectations (<xref ref-type="bibr" rid="ref7">Bey and McAdams, 2002</xref>). In particular, auditory working memory (AWM), the active mental workspace that allows the temporary storage and manipulation of short-term acoustic information (<xref ref-type="bibr" rid="ref9001">Baddeley, 1992</xref>), has been suggested to play a crucial role in auditory stream segregation (<xref ref-type="bibr" rid="ref7">Bey and McAdams, 2002</xref>; <xref ref-type="bibr" rid="ref33">Heinrich et al., 2008</xref>; <xref ref-type="bibr" rid="ref17">Dalton et al., 2009</xref>; <xref ref-type="bibr" rid="ref25">Escobar et al., 2020</xref>), accounting for individual differences in this capacity (<xref ref-type="bibr" rid="ref29">Gordon-Salant and Cole, 2016</xref>; <xref ref-type="bibr" rid="ref58">Parbery-Clark et al., 2009a</xref>).</p>
<sec id="sec2">
<title>The influence of musicianship on AWM and SIN perception</title>
<p>Musicians have emerged as a distinctive population of interest due to their constant exposure and attunement to complex auditory patterns (<xref ref-type="bibr" rid="ref35">Herholz and Zatorre, 2012</xref>; <xref ref-type="bibr" rid="ref12">Brown et al., 2015</xref>). Musical activities such as practice and performance are proposed to lead to improved stream segregation abilities (<xref ref-type="bibr" rid="ref72">Swaminathan et al., 2015</xref>; for review, see <xref ref-type="bibr" rid="ref16">Coffey et al., 2017</xref>) and enhanced working memory, especially for tonal stimuli (for meta-analysis, see <xref ref-type="bibr" rid="ref73">Talamini et al., 2017</xref>; for review, see <xref ref-type="bibr" rid="ref79">Yurgil et al., 2020</xref>).</p>
<p>Traditionally, the relationship between musicianship, AWM, and stream segregation has been examined using a variety of Speech-in-Noise (SIN) tests (<xref ref-type="bibr" rid="ref55">Nilsson et al., 1994</xref>; <xref ref-type="bibr" rid="ref42">Killion et al., 2004</xref>). Many, though not all, studies have reported that musical training is correlated with a better perception of speech-in-noise (for review, see <xref ref-type="bibr" rid="ref16">Coffey et al., 2017</xref>). <xref ref-type="bibr" rid="ref59">Parbery-Clark et al. (2009b)</xref> specifically reported a strong relationship between AWM and SIN perceptual abilities across age groups in musicians, suggesting that the AWM enhancement of musicians mediates their better performance in SIN.</p>
<p>Several studies suggest the possibility that AWM may be related to SIN performance in musicians. Research using Mandarin nonsense sentence stimuli has shown a mediating role of AWM in ameliorating SIN perception loss in older, but not younger musicians, as demonstrated through path analysis (<xref ref-type="bibr" rid="ref83">Zhang et al., 2021</xref>). Other research reported musicians&#x2019; SIN advantage and correlation between SIN scores and working memory, although the associations are limited to cases where the noise induces linguistic interference (<xref ref-type="bibr" rid="ref78">Yoo and Bidelman, 2019</xref>). <xref ref-type="bibr" rid="ref25">Escobar et al. (2020)</xref> reported that after equating for AWM capacity, there was no difference between musicians and non-musicians; however AWM was correlated with performance on several SIN tests.</p>
<p>The varied findings in SIN tests could be related to variations in task design, criteria for musicianship, and different scoring methods (for further explanation, see <xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>). More critically, these SIN tasks fall short of providing the granularity required to assess individual perceptual components and top-down cues involved in stream segregation, which could potentially be affected by training or other interventions. Furthermore, SIN assessments in prior studies exclusively focused on sentence or word detection, which limits the generalizability of the findings on hearing-in-noise to the speech modality alone.</p>
</sec>
<sec id="sec3">
<title>Music-in-Noise Task</title>
<p>The Music-in-Noise Task (MINT) is a stream segregation paradigm designed to eliminate linguistic influences, expand measures beyond speech perception, and assess different top-down processes (<xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>). By using a melodic target embedded within a mix of musical sounds as informational masking, MINT enables the systematic disentangling of critical auditory sub-skills involved in effective stream segregation (<xref ref-type="bibr" rid="ref69">Slater and Kraus, 2016</xref>; <xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>), including rhythmic, visual, spatial attentional, and predictive cues. Paralleling the findings in SIN research, <xref ref-type="bibr" rid="ref13">Coffey et al. (2019)</xref> reported significant correlations between cumulative musical practice hours and music-in-noise perception, particularly in rhythm, prediction, and visual conditions. The study also showed a significant relationship between AWM and overall MINT performance. However, AWM capability in that study was only accounted for as a covariate in analyzing musical training&#x2019;s impact on MINT sub-conditions, along with other factors such as pitch discrimination and multilingualism. Consequently, there remains a gap in the literature regarding the interaction between musical training, AWM, and music-in-noise perception.</p>
</sec>
<sec id="sec4">
<title>Specific aims and hypothesis</title>
<p>The goal of the present study was (1) to determine if the purported musician advantage in auditory stream segregation could be consistently observed, and (2) specifically to test the hypothesis that such an effect is mediated by enhanced AWM. We implemented a test-replication research design where the same study was conducted in two phases with independent samples. This approach allows for testing the robustness of the findings across cohorts of different distributions of musicianship. In Experiment 1 (Initial Phase) the phenomenon of interest was identified and analyzed. Experiment 2 (Replication Phase) tested whether the initial findings could be replicated in a more heterogeneous sample, thus ensuring that the observed effects are robust and not solely related to the specific sample used in the first phase.</p>
<p>To test the effects of music training on both MINT and AWM, we carried out correlational analyses using cumulative practice hours as the independent variable; for additional verification and to account for possible nonlinear effects, we also carried out categorical comparisons of musicians vs. non-musicians. We hypothesized a positive relationship between musical training and AWM and MINT task performance. Finally, we aimed to test the hypothesis that musical training fosters improvements in MINT through the enhancement of AWM capabilities, as suggested but not fully confirmed by the literature, positioning AWM as a mediating factor in this relationship. We therefore used statistical mediation analysis to understand the underlying process by which musical training influences music-in-noise perception, delineating direct and indirect effects through the mediator (AWM).</p>
</sec>
</sec>
<sec id="sec5">
<title>Experiment (1)</title>
<sec id="sec6">
<title>Methods and materials</title>
<sec id="sec7">
<title>Participants</title>
<p>In the initial phase, we recruited 82 healthy young adults with either minimal or extensive piano experience. Participants were recruited from various advertisement sources (social media, flyers, etc.), while some expert musicians were specifically recruited through a snowball sampling method. As part of a broader study not detailed here, participants completed a comprehensive battery of tasks and were compensated with cash for their overall time. All Participants had completed at least 1 year of university-level education, and their demographic information is provided in <xref ref-type="table" rid="tab1">Table 1</xref>. To conduct group comparisons on the effects of musical training, we defined subjects with &#x003E;10 cumulative years of music training and&#x202F;&#x003E;&#x202F;4,000&#x202F;h of lifetime practice as Musicians (<italic>N</italic>&#x202F;=&#x202F;42), and subjects with &#x003C;2&#x202F;years of musical activity as Non-Musicians (<italic>N</italic>&#x202F;=&#x202F;20) (<xref ref-type="table" rid="tab1">Table 1</xref>).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Experiment 1.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Variable</th>
<th align="center" valign="top">Total (<italic>N</italic>&#x202F;=&#x202F;82)</th>
<th align="center" valign="top">Musicians (<italic>N</italic>&#x202F;=&#x202F;42)</th>
<th align="center" valign="top">Non-musicians (<italic>N</italic>&#x202F;=&#x202F;20)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Age (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="top">25.5&#x202F;&#x00B1;&#x202F;6.8</td>
<td align="center" valign="top">24.3&#x202F;&#x00B1;&#x202F;5.6</td>
<td align="center" valign="top">29.2&#x202F;&#x00B1;&#x202F;7.8</td>
</tr>
<tr>
<td align="left" valign="top">Age range</td>
<td align="center" valign="top">18&#x2013;45</td>
<td align="center" valign="top">18&#x2013;44</td>
<td align="center" valign="top">21&#x2013;45</td>
</tr>
<tr>
<td align="left" valign="top">Sex</td>
<td align="center" valign="top">30 males, 51 females, 1 non-binary</td>
<td align="center" valign="top">16 males, 25 females, 1 non-binary</td>
<td align="center" valign="top">7 males, 13 females</td>
</tr>
<tr>
<td align="left" valign="top">Cumulative practice hours (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="top">5,300&#x202F;&#x00B1;&#x202F;5,900</td>
<td align="center" valign="top">9,400&#x202F;&#x00B1;&#x202F;5,700</td>
<td align="center" valign="top">90&#x202F;&#x00B1;&#x202F;200</td>
</tr>
<tr>
<td align="left" valign="top">Cumulative practice range</td>
<td align="center" valign="top">0&#x2013;30,000</td>
<td align="center" valign="top">4,100&#x2013;30,000</td>
<td align="center" valign="top">0&#x2013;620</td>
</tr>
<tr>
<td align="left" valign="top">Age of onset (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="top">N/A</td>
<td align="center" valign="top">5.0&#x202F;&#x00B1;&#x202F;1.3</td>
<td align="center" valign="top">N/A</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Descriptive statistics of demographic and musical training variables.</p>
</table-wrap-foot>
</table-wrap>
<p>Subjects provided informed consent and were compensated for their participation and time. All experimental procedures were approved by the McGill University Faculty of Medicine Research Ethics Board. All participants were screened to have normal or corrected-to-normal vision and reported no history of neurological disorders. Normal binaural hearing was confirmed by an audiometric test which measured pure-tone thresholds from 250 to 8,000&#x202F;Hz (less than 25&#x202F;dB SL). Participants with binaural hearing thresholds above 25&#x202F;dB HL did not proceed with the study as deficiencies in the frequency range may influence their task performance. Out of the 82 participants from Experiment 1 who completed all parts of the study, 4 were excluded from the MINT analysis due to their inability to process basic musical content (with 2 or more out of 6 incorrect responses for the MINT task Control condition, see description below).</p>
</sec>
<sec id="sec8">
<title>Procedure</title>
<p>Prior to the testing session, participants confirmed eligibility and completed the Montreal Music History Questionnaire (MMHQ) (<xref ref-type="bibr" rid="ref15">Coffey et al., 2011</xref>). The MMHQ provides the subject&#x2019;s self-reported information regarding overall musical experience (instruments played, total cumulative practice hours), language proficiency, basic demographics, etc. The tasks were administered in the context of a larger test battery that will not be reported here. Each testing session began with an audiometry hearing test, followed by a series of behavioral tasks, including the AWM task (<xref ref-type="bibr" rid="ref2">Albouy et al., 2017</xref>) and the MINT task (<xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>); see the following section for descriptions. The visual component of each task was presented on a computer screen and sounds were presented binaurally through headphones (ATH-M50x, Audio-Technica). A comfortable sound level set at 73&#x202F;dB was determined during pilot testing and kept constant for all subjects and both tasks.</p>
</sec>
<sec id="sec9">
<title>Measures and Behavioral Tasks</title>
<list list-type="simple">
<list-item>
<p>(1) To test for individual AWM abilities and eliminate linguistic influences, we implemented an AWM task that measures individuals&#x2019; auditory retention and manipulation capabilities with sets of tonal stimuli (<xref ref-type="bibr" rid="ref2">Albouy et al., 2017</xref>). This AWM task uses a discrimination design that involves the detection of a local pitch change within two tonal patterns differing in temporal order, described as the &#x201C;Manipulation Task&#x201D; in <xref ref-type="bibr" rid="ref2">Albouy et al. (2017)</xref>. On each trial, participants first listened to three sequentially presented 250&#x202F;ms tones, which were followed after a 2000&#x202F;ms silent retention interval by a probe consisting of another set of three tones (<xref ref-type="fig" rid="fig1">Figure 1</xref>). The task was to determine whether the sequence of the second set of three tones was a perfect reverse of the first set or not. The structure of this task engages AWM capabilities, requiring participants to retain the initial set of tones and inversely manipulate them in their mental workspace during the retention interval (<xref ref-type="bibr" rid="ref2">Albouy et al., 2017</xref>; <xref ref-type="bibr" rid="ref26">Foster et al., 2013</xref>; <xref ref-type="bibr" rid="ref81">Zatorre et al., 2010</xref>). Six practice trials with feedback were provided, followed by 100 experimental trials without feedback. Task trials are randomized with a maximum of 3 consecutive trials with the same condition. The average accuracy score was then computed based on the percentage of responses correct.</p>
</list-item>
<list-item>
<p>(2) The Music-in-Noise Task (MINT) assesses stream segregation, involving the detection of a target musical melody embedded in irrelevant musical background noise (<xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>). Employing a match-mismatch discrimination design, each trial features one melodic line embedded in masking noise, and a melodic line presented in silence (<xref ref-type="fig" rid="fig2">Figure 2</xref>). Participants were asked to judge if the two presented melodies were the same or different. The MINT consists of five conditions which capture auditory sub-skills and the influence of perceptual cues: (1) Baseline (Pitch; <xref ref-type="fig" rid="fig2">Figure 2A</xref>), where the target-noise mixture is first presented, followed by the comparison melody in silence, without additional cues; (2) Rhythm (<xref ref-type="fig" rid="fig2">Figure 2B</xref>), the target is a rhythmic pattern with no pitch variation; (3) Spatial (<xref ref-type="fig" rid="fig2">Figure 2D</xref>), an additional spatial attentional cue is presented for the participant to attend to sounds coming from their left or right side (the perception of which is manipulated via interaural sound level difference); (4) Visual (<xref ref-type="fig" rid="fig2">Figure 2E</xref>), an additional visual cue outlining the melody&#x2019;s contour is presented to facilitate target detection within the mixture; and (5) Prediction (<xref ref-type="fig" rid="fig2">Figure 2C</xref>), subjects hear the target melody in silence first, followed by the comparison melody in noise. There is also a control condition with both melodies presented in silence to screen out participants incapable of discriminating the musical content of the MINT task, and who may therefore have amusia (<xref ref-type="bibr" rid="ref61">Peretz et al., 2002</xref>). All conditions were tested at three different signal-to-noise (SNR) levels (0, &#x2212;3, and&#x202F;&#x2212;&#x202F;6&#x202F;dB). Each condition involved 2 practice trials, followed by 20 experimental trials presented in a randomized block order across subjects. The accuracy score for each individual condition and overall performance is calculated by averaging the percentage of correct responses across all SNR levels within the respective condition(s); and the accuracy score for performance at each SNR level is computed by averaging the percentage of correct responses across all conditions at that specific SNR level (for further procedural details, see <xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>).</p>
</list-item>
</list>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Illustration of AWM task (adapted from <xref ref-type="bibr" rid="ref2">Albouy et al., 2017</xref>). &#x201C;Match&#x201D; trials: the second sequence of melody was presented in a reversed temporal order of the first melody; &#x201C;mismatch&#x201D; trials: the second melody was presented in reversed temporal order, with one local pitch change. This required the retention and manipulation of auditory information.</p>
</caption>
<graphic xlink:href="fpsyg-16-1538511-g001.tif"/>
</fig>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Illustration for MINT (adapted from <xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>). &#x201C;Match&#x201D; trials: the melody mixed in noise is identical to the melody presented in silence; &#x201C;mismatch&#x201D; trials: the melody mixed in noise is not identical to the melody presented in silence. MINT consists of five conditions: <bold>(A)</bold> Baseline (Pitch), <bold>(B)</bold> Rhythm, <bold>(C)</bold> Prediction, <bold>(D)</bold> Spatial, and <bold>(E)</bold> Visual. In the Spatial condition <bold>(D)</bold>, an icon on one side of the screen directed the listener to attend to the corresponding ear. In the Visual condition <bold>(E)</bold>, a scrolling graphic representation outlines the timing and melodic contour of the target melody.</p>
</caption>
<graphic xlink:href="fpsyg-16-1538511-g002.tif"/>
</fig>
</sec>
<sec id="sec10">
<title>Data analysis</title>
<p>Data analyses were conducted using IBM SPSS Statistics (version 29.0.2.0) to perform correlation and mediation analyses. To examine the correlational relationships between cumulative practice hours, AWM, and MINT scores, both parametric (Pearson&#x2019;s r) and non-parametric (Spearman&#x2019;s rho) tests were conducted. For consistency with the mediation analysis, which uses raw values rather than ranks, only Pearson&#x2019;s correlation coefficients are reported. Nonetheless, all tests produced comparable significant results (see <xref rid="SM1" ref-type="supplementary-material">Supplementary material</xref> for non-parametric correlations). Comparisons between Experiment 1 and Experiment 2 were performed using independent samples <italic>t</italic>-tests, while comparisons between Musicians and Non-Musicians in each experiment were conducted using the Mann&#x2013;Whitney U test, concerning the non-normal distribution and small sample size.</p>
<p>PROCESS macro (version 4.2 beta release) for SPSS by A. F. Hayes was used for mediation analysis. PROCESS is an observed variable ordinary least square and logistic regression path analysis tool that provides estimation of direct and indirect effects within both single and multiple mediator models (<xref ref-type="bibr" rid="ref62">Preacher and Hayes, 2004</xref>). It allows us to estimate the conditional indirect effects of AWM as a mediator between music training and MINT outcomes. All mediation models were tested for statistical significance through bootstrapping with 5,000 resamples, conducted with replacement, with significance determined by confidence intervals (<xref ref-type="bibr" rid="ref63">Preacher and Hayes, 2008</xref>). Bootstrapping is a common procedure in mediation analysis that enhances statistical power and robustness against non-normal distributions, small sample sizes, and outliers.</p>
</sec>
</sec>
<sec id="sec11">
<title>Results</title>
<sec id="sec12">
<title>Musical training and AWM</title>
<p>Descriptive statistics for AWM score (% correct) for all participants, as well as for the Musician and Non-Musician groups, are presented in <xref ref-type="table" rid="tab2">Table 2</xref>. Pearson correlation indicates a significant relationship between cumulative hours of practice and AWM task performance (<italic>r</italic>&#x202F;=&#x202F;0.399, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001; <xref ref-type="fig" rid="fig3">Figure 3A</xref>; see <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S1</xref> for Spearman&#x2019;s results). Mann&#x2013;Whitney U test indicates a significant difference between Musicians and Non-Musicians groups on AWM score (<italic>U</italic>&#x202F;=&#x202F;792, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) (<xref ref-type="fig" rid="fig3">Figure 3C</xref>).</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Experiment 1.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Variable</th>
<th align="center" valign="top">Total (<italic>N</italic>&#x202F;=&#x202F;78)</th>
<th align="center" valign="top">Musicians (<italic>N</italic>&#x202F;=&#x202F;42)</th>
<th align="center" valign="top">Non-musicians (<italic>N</italic>&#x202F;=&#x202F;20)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">AWM (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="bottom">78.75&#x202F;&#x00B1;&#x202F;18.52</td>
<td align="center" valign="bottom">86.94&#x202F;&#x00B1;&#x202F;13.11</td>
<td align="center" valign="bottom">56.65&#x202F;&#x00B1;&#x202F;12.57</td>
</tr>
<tr>
<td align="left" valign="bottom">Range</td>
<td align="center" valign="bottom">45&#x2013;100</td>
<td align="center" valign="bottom">57&#x2013;100</td>
<td align="center" valign="bottom">45&#x2013;92</td>
</tr>
<tr>
<td align="left" valign="bottom">MINT (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="bottom">82.05&#x202F;&#x00B1;&#x202F;7.60</td>
<td align="center" valign="bottom">84.64&#x202F;&#x00B1;&#x202F;5.86</td>
<td align="center" valign="bottom">75.13&#x202F;&#x00B1;&#x202F;8.36</td>
</tr>
<tr>
<td align="left" valign="bottom">Range</td>
<td align="center" valign="bottom">57.33&#x2013;92.00</td>
<td align="center" valign="bottom">70.67&#x2013;92.00</td>
<td align="center" valign="bottom">57.33&#x2013;89.33</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Descriptive statistics for AWM and MINT scores.</p>
</table-wrap-foot>
</table-wrap>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Experiment 1 results. <bold>(A)</bold> Cumulative practice hours vs. AWM task performance. Pearson correlation is significant at the 0.1% level. <bold>(B)</bold> Cumulative practice hours vs. overall MINT performance. Pearson correlation is significant at the 0.1% level. <bold>(C)</bold> Violin plot showing AWM task performance for Musician (mean&#x202F;=&#x202F;86.94, SD =13.11, <italic>N</italic>&#x202F;=&#x202F;42) and Non-Musician groups (mean&#x202F;=&#x202F;56.65, SD&#x202F;=&#x202F;12.57, N&#x202F;=&#x202F;20). MINT performance for Musician (mean&#x202F;=&#x202F;84.63, SD = 5.86) and Non-Musician groups (mean&#x202F;=&#x202F;75.13, SD&#x202F;=&#x202F;8.36). Significant group difference for both tasks <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001. <bold>(D)</bold> AWM ability vs. overall MINT performance. Pearson correlation is significant at the 0.1% level.</p>
</caption>
<graphic xlink:href="fpsyg-16-1538511-g003.tif"/>
</fig>
</sec>
<sec id="sec13">
<title>Musical training and MINT outcomes</title>
<p>Descriptive statistics for overall MINT performance are presented in <xref ref-type="table" rid="tab2">Table 2</xref>. The mean accuracy scores for each MINT sub-condition were: Baseline (Pitch)&#x202F;=&#x202F;80.94 (<italic>SD</italic>&#x202F;=&#x202F;11.95), Rhythm&#x202F;=&#x202F;63.85 (<italic>SD</italic>&#x202F;=&#x202F;14.56), Spatial&#x202F;=&#x202F;84.02 (<italic>SD</italic>&#x202F;=&#x202F;10.49), Visual&#x202F;=&#x202F;90.60 (<italic>SD</italic>&#x202F;=&#x202F;10.27), and Prediction&#x202F;=&#x202F;90.85 (<italic>SD</italic>&#x202F;=&#x202F;8.80). The mean accuracy scores for each SNR level were: SNR 0&#x202F;=&#x202F;84.77 (<italic>SD</italic>&#x202F;=&#x202F;10.74), SNR &#x2212;3&#x202F;=&#x202F;83.79 (<italic>SD</italic>&#x202F;=&#x202F;9.32), and SNR &#x2212;6&#x202F;=&#x202F;77.59 (<italic>SD</italic>&#x202F;=&#x202F;9.93). Pearson correlation analysis between cumulative practice hours and overall MINT task performance revealed a significant correlation, with a <italic>r</italic>-value of 0.363 (<italic>p</italic> &#x003C;&#x202F;0.001; <xref ref-type="fig" rid="fig3">Figure 3B</xref>; see <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S1</xref> for Spearman&#x2019;s results). Cumulative hours of practice were also correlated with the Baseline (Pitch) (<italic>r</italic>&#x202F;=&#x202F;0.22, <italic>p</italic>&#x202F;=&#x202F;0.025), Prediction (<italic>r</italic>&#x202F;=&#x202F;0.26, <italic>p</italic>&#x202F;=&#x202F;0.010), Rhythm (<italic>r</italic>&#x202F;=&#x202F;0.28, <italic>p</italic>&#x202F;=&#x202F;0.007), and Visual (<italic>r</italic>&#x202F;=&#x202F;0.29, <italic>p</italic>&#x202F;=&#x202F;0.005) sub-conditions. In addition, cumulative hours of practice correlated with all SNR levels: SNR 0 (<italic>r</italic>&#x202F;=&#x202F;0.24, <italic>p</italic>&#x202F;=&#x202F;0.019), SNR &#x2212;3 (<italic>r</italic>&#x202F;=&#x202F;0.31, <italic>p</italic>&#x202F;=&#x202F;0.003), and SNR &#x2212;6 (<italic>r</italic>&#x202F;=&#x202F;0.29, <italic>p</italic>&#x202F;=&#x202F;0.006). Mann&#x2013;Whitney U test shows a significant difference in MINT performance between Musicians and Non-Musicians (U&#x202F;=&#x202F;696, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) (<xref ref-type="fig" rid="fig3">Figure 3C</xref>).</p>
</sec>
<sec id="sec14">
<title>AWM and MINT performance</title>
<p>Pearson correlation analysis evaluated the relationship between performance on the AWM and MINT tasks. The AWM scores significantly correlated with the overall MINT scores (<italic>r</italic> =&#x202F;0.584, <italic>p</italic> &#x003C;&#x202F;0.001) (<xref ref-type="fig" rid="fig3">Figure 3D</xref>). The AWM correlated with all MINT sub-conditions, as listed in <xref ref-type="table" rid="tab3">Table 3A</xref> (see also <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S2A</xref>). Moreover, AWM was correlated with all the SNR levels, as presented in <xref ref-type="table" rid="tab3">Table 3B</xref> (see also <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S2B</xref>). Fisher&#x2019;s test performed to compare the differences between the z-transformations of each pair of correlations demonstrated that none of the correlations were significantly larger than the others.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Experiment 1.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="7">(A)</th>
</tr>
<tr>
<th/>
<th align="center" valign="top">Pitch total</th>
<th align="center" valign="top">Prediction total</th>
<th align="center" valign="top">Rhythm total</th>
<th align="center" valign="top">Spatial total</th>
<th align="center" valign="top">Visual total</th>
<th align="center" valign="top">MINT overall</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">AWM task performance</td>
<td align="center" valign="bottom">0.424&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.447&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.373&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.244&#x002A;</td>
<td align="center" valign="bottom">0.509&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.584&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="bottom">Sig. (2-tailed)</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">0.032</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
</tr>
</tbody>
</table>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="4">(B)</th>
</tr>
<tr>
<th/>
<th align="center" valign="top">SNR&#x2212;6</th>
<th align="center" valign="top">SNR&#x2212;3</th>
<th align="center" valign="top">SNR 0</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">AWM task performance</td>
<td align="center" valign="bottom">0.538&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.360&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.432&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="bottom">Sig. (2-tailed)</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>(A) AWM task performance vs. MINT conditions. Pearson correlations based on AWM task percent correct and average MINT scores for the corresponding condition. (B) AWM task performance vs. MINT SNR Levels. Pearson correlations based on AWM task percent correct and average MINT scores for each SNR level across conditions. &#x002A;&#x002A;&#x002A;denotes statistical significance at the 0.1% level. &#x002A;denotes statistical significance at the 5% level.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec15">
<title>Mediating role of AWM</title>
<p>Regression analyses with bootstrapping were performed to assess each component of the proposed mediation model. First, it was found that cumulative music training hours were positively associated with both MINT performance [<italic>R</italic> =&#x202F;0.36, <italic>F</italic>(1, 76)&#x202F;=&#x202F;11.56, <italic>p</italic> =&#x202F;0.001] and AWM performance [<italic>R</italic> =&#x202F;0.40, <italic>F</italic>(1, 76)&#x202F;=&#x202F;14.42, <italic>p</italic> &#x003C;&#x202F;0.001]. It was also found that the mediator, AWM ability, was positively related to the MINT test score [<italic>R</italic> =&#x202F;0.58, <italic>F</italic>(1, 76)&#x202F;=&#x202F;39.42, <italic>p</italic> &#x003C;&#x202F;0.001]. Lastly, multiple regression analysis was conducted to examine the effects of hours of musical training (<italic>X<sub>1</sub></italic>) and AWM (<italic>X<sub>2</sub></italic>) on MINT performance (<italic>Y</italic>). Results indicated that the overall regression model was significant [<italic>R</italic> =&#x202F;0.601, <italic>F</italic>(2, 75)&#x202F;=&#x202F;21.24, <italic>p</italic> &#x003C;&#x202F;0.001], with VIF&#x202F;=&#x202F;1.19, MSE&#x202F;=&#x202F;37.88, and &#x03B7;<sup>2</sup> =&#x202F;0.362 (<xref ref-type="fig" rid="fig4">Figure 4</xref>). Both predictors contributed to better MINT performance (&#x03B2;<sub>1</sub> =&#x202F;0.16, <italic>p</italic> =&#x202F;0.129; &#x03B2;<sub>2</sub> =&#x202F;0.52, <italic>p</italic> &#x003C;&#x202F;0.001).</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Mediation analysis results. Enhanced AWM was the significant mediator of the correlation between music training (cumulative practice hours) and MINT performance. Pearson correlation is significant at the 0.1% level.</p>
</caption>
<graphic xlink:href="fpsyg-16-1538511-g004.tif"/>
</fig>
<p>Because the general model, the a-path (music training to AWM), and the b-path (AWM to MINT) were significant, mediation analysis was tested using the bootstrapping method with bias-corrected confidence estimates (refer to Methods and Materials section Data Analysis; <xref ref-type="bibr" rid="ref62">Preacher and Hayes, 2004</xref>). The 95% confidence interval of the indirect effect was obtained with 5,000 bootstrap samples and confirmed the significant mediating role of AWM in the relationship between music training and MINT task performance (<xref ref-type="fig" rid="fig4">Figure 4</xref>). Regression results also indicated that the direct effect of music training on MINT becomes non-significant (<italic>p</italic>&#x202F;=&#x202F;0.11) when controlling for AWM, thus suggesting full mediation. Moreover, confidence intervals derived from bootstrapping mediation analysis revealed mediation effects of AWM on the Baseline (Pitch), Prediction, Rhythm, and Visual sub-conditions. Our results also indicate significant mediating effects of AWM on MINT performance at the SNR 0 and SNR &#x2212;6 levels.</p>
</sec>
</sec>
<sec id="sec16">
<title>Interim discussion</title>
<p>The main findings from Experiment 1 aligned with our hypothesis, highlighting a clear advantage for musicians in both AWM abilities and music-in-noise perception. The results reveal a positive correlation between the number of practice hours and AWM task performance, and that the Musician group consistently outperformed Non-Musicians in AWM abilities. Additionally, both correlational and group comparison analyses illustrate a significant association between musical experience and enhanced music-in-noise performance. The bootstrapping analysis concerning practice hours, AWM and MINT further supports our mediation hypothesis, suggesting that AWM ability substantially mediates the relationship between musical experience and music-in-noise perception.</p>
<p>It is important to note that the majority of subjects from Experiment 1 were selected to fall into either non-musicians or expert musicians categories. Consequently, the dataset includes fewer subjects with moderate exposure to music and thus may be less reflective of the general population&#x2019;s musical experience distribution. Although Pearson&#x2019;s correlations indicate a notable parametric association between music training and both AWM and music-in-noise abilities, replicating the main effects observed in Experiment 1 based on a more normative and representative dataset would strengthen the statistical robustness and generalizability of the results.</p>
<p>In addition, results from the MINT task in Experiment 1 showed that participants performed optimally around the 80% mark, suggesting that the SNR range tested (0, &#x2212;3, and&#x202F;&#x2212;&#x202F;6) may not fully challenge their music-in-noise capabilities. In light of these findings, we devised a second phase of the study to extend the difficulty of the MINT task with SNR levels of &#x2212;3, &#x2212;6, and&#x202F;&#x2212;&#x202F;9. By adjusting the noise ratio, we aim to better understand how musicianship affects MINT performance under more demanding conditions and to assess whether the effects observed in Experiment 1 persist with increased task demand. This modification should provide an assessment of the consistency of musical training effects across a wider range of noise interference challenges.</p>
<p>Based on the main correlational results from Experiment 1, we determined the minimum sample size required for Experiment 2 to achieve the desired statistical power. Using an expected correlation coefficient (<italic>&#x03C1;</italic>) of 0.40, a significance level (<italic>&#x03B1;</italic>) of 0.05, and a power (1 - <italic>&#x03B2;</italic>) of 0.90, and applying the Fisher Transformation of the correlation coefficient, the minimum sample size required for Experiment 2 is calculated to be 66.</p>
</sec>
</sec>
<sec id="sec17">
<title>Experiment (2)</title>
<sec id="sec18">
<title>Methods and materials</title>
<sec id="sec19">
<title>Participants</title>
<p>In the replication phase, we recruited 73 subjects with a distributed range of music experience and expertise (<xref ref-type="table" rid="tab4">Table 4</xref>). Recruitment methods and compensation were consistent with those used in Experiment 1. All participants had completed at least 1 year of university-level education. On average, subjects from Experiment 2 have fewer practice hours than those in Experiment 1; <italic>t</italic>(143)&#x202F;=&#x202F;&#x2212;1.84, <italic>p</italic>&#x202F;=&#x202F;0.034. Within the 73 subjects, 19 were categorized as Musicians according to the same criteria as above, and 18 were Non-Musicians (<xref ref-type="table" rid="tab4">Table 4</xref>).</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Experiment 2.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Variable</th>
<th align="center" valign="top">Total (<italic>N</italic>&#x202F;=&#x202F;73)</th>
<th align="center" valign="top">Musicians (<italic>N</italic>&#x202F;=&#x202F;19)</th>
<th align="center" valign="top">Non-Musicians (<italic>N</italic>&#x202F;=&#x202F;18)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Age (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="middle">27.0&#x202F;&#x00B1;&#x202F;6.6</td>
<td align="center" valign="middle">28.3&#x202F;&#x00B1;&#x202F;6.5</td>
<td align="center" valign="middle">26.4&#x202F;&#x00B1;&#x202F;6.3</td>
</tr>
<tr>
<td align="left" valign="middle">Age range</td>
<td align="center" valign="middle">18&#x2013;49</td>
<td align="center" valign="middle">20&#x2013;41</td>
<td align="center" valign="middle">20&#x2013;45</td>
</tr>
<tr>
<td align="left" valign="middle">Sex</td>
<td align="center" valign="middle">37 males, 35 females, 1 non-binary</td>
<td align="center" valign="middle">10 males, 8 females, 1 non-binary</td>
<td align="center" valign="middle">10 males, 8 females</td>
</tr>
<tr>
<td align="left" valign="middle">Cumulative practice hours (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="middle">4,600&#x202F;&#x00B1;&#x202F;6,600</td>
<td align="center" valign="middle">12,000 &#x00B1;&#x202F;8,600</td>
<td align="center" valign="middle">200&#x202F;&#x00B1;&#x202F;600</td>
</tr>
<tr>
<td align="left" valign="middle">Cumulative practice range</td>
<td align="center" valign="middle">0&#x2013;34,000</td>
<td align="center" valign="middle">4,700&#x2013;32,000</td>
<td align="center" valign="middle">0&#x2013;1900</td>
</tr>
<tr>
<td align="left" valign="middle">Age of onset (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="middle">N/A</td>
<td align="center" valign="middle">6.0&#x202F;&#x00B1;&#x202F;2.5</td>
<td align="center" valign="middle">N/A</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Descriptive statistics of demographic and musical training variables.</p>
</table-wrap-foot>
</table-wrap>
<p>All procedures and screening criteria remained consistent with those in Experiment 1 and were approved by either the McGill University Faculty of Medicine Research Ethics Board or Western University Non-Medical Research Ethics Board. Out of the 73 subjects who completed all components of Experiment 2, 3 who could not process basic musical content were excluded from the MINT analysis.</p>
</sec>
<sec id="sec20">
<title>Procedure</title>
<p>Refer to Experiment 1 Materials and methods section Procedure.</p>
</sec>
<sec id="sec21">
<title>Measures and Behavioral Tasks</title>
<p>Refer to Experiment 1 Materials and methods section Measures and Behavioral Tasks.</p>
</sec>
</sec>
<sec id="sec22">
<title>Results</title>
<sec id="sec23">
<title>Musical training and AWM</title>
<p>The mean accuracy score (% correct) for the AWM task in the second sample was 66.33 (<italic>SD</italic> =&#x202F;15.85, range: 41&#x2013;100, <italic>N</italic> =&#x202F;70). Results from a one-tailed Pearson correlation test indicated a trend toward significance in the association between musical training and AWM task performance (<italic>r</italic>&#x202F;=&#x202F;0.191, <italic>p</italic>&#x202F;=&#x202F;0.057). Potential outlier effects were suspected through examination of the data distribution, prompting the use of Spearman&#x2019;s rank-order correlation, which is more robust to extreme values. The Spearman&#x2019;s test revealed a significant monotonic relationship between AWM scores and cumulative hours of practice (&#x03C1;&#x202F;=&#x202F;0.324, <italic>p</italic>&#x202F;=&#x202F;0.003). The discrepancy between the rank-order and parametric test results suggests that the data may have been affected by extreme values. Upon comprehensive examination of the total 148 qualified subjects from Experiment 1 and 2 using linear regression (practice hours versus AWM performance), we identified two subjects from Experiment 2 with performance significantly deviating from the model&#x2019;s predictions. Specifically, one subject had a standardized residual of &#x2212;2.45 and the other &#x2212;2.40, while the standardized residuals for the remaining 146 subjects ranged between &#x2212;1.67 and 1.70. Consequently, these two subjects are considered outliers and were excluded from subsequent analysis.</p>
<p>By removing the two outliers, the adjusted mean AWM accuracy score in the second sample is presented in <xref ref-type="table" rid="tab5">Table 5</xref>. Independent samples <italic>t</italic>-test indicates a significantly lower AWM performance for the subjects in Experiment 2 compared to Experiment 1; <italic>t</italic>(144)&#x202F;=&#x202F;&#x2212;4.17, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001. A significant relationship between AWM score and cumulative hours of practice is demonstrated with Pearson&#x2019;s test (<italic>r</italic>&#x202F;=&#x202F;0.370, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001) (<xref ref-type="fig" rid="fig5">Figure 5A</xref>; see <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S3</xref> for Spearman&#x2019;s results). In addition, the Mann&#x2013;Whitney U test also indicates a group difference in AWM between Musicians and Non-Musicians (<italic>U</italic>&#x202F;=&#x202F;302, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001; <xref ref-type="table" rid="tab5">Table 5</xref>; <xref ref-type="fig" rid="fig5">Figure 5C</xref>).</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Experiment 2.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Variable</th>
<th align="center" valign="top">Total (<italic>N</italic>&#x202F;=&#x202F;68)</th>
<th align="center" valign="top">Musicians (<italic>N</italic>&#x202F;=&#x202F;19)</th>
<th align="center" valign="top">Non-musicians (<italic>N</italic>&#x202F;=&#x202F;18)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">AWM (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="bottom">66.78&#x202F;&#x00B1;&#x202F;15.83</td>
<td align="center" valign="bottom">76.68&#x202F;&#x00B1;&#x202F;16.60</td>
<td align="center" valign="bottom">55.44&#x202F;&#x00B1;&#x202F;6.82</td>
</tr>
<tr>
<td align="left" valign="bottom">Range</td>
<td align="center" valign="bottom">41&#x2013;100</td>
<td align="center" valign="bottom">55&#x2013;100</td>
<td align="center" valign="bottom">41&#x2013;69</td>
</tr>
<tr>
<td align="left" valign="bottom">MINT (mean&#x202F;&#x00B1;&#x202F;<italic>SD</italic>)</td>
<td align="center" valign="bottom">73.73&#x202F;&#x00B1;&#x202F;11.05</td>
<td align="center" valign="bottom">78.74&#x202F;&#x00B1;&#x202F;9.48</td>
<td align="center" valign="bottom">66.15&#x202F;&#x00B1;&#x202F;11.54</td>
</tr>
<tr>
<td align="left" valign="bottom">Range</td>
<td align="center" valign="bottom">46.67&#x2013;90.67</td>
<td align="center" valign="bottom">53.33&#x2013;89.33</td>
<td align="center" valign="bottom">46.67&#x2013;90.67</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Descriptive statistics for AWM and MINT scores.</p>
</table-wrap-foot>
</table-wrap>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Experiment 2 results. <bold>(A)</bold> Cumulative practice hours vs. AWM task performance. Pearson correlation is significant at the 0.1% level. <bold>(B)</bold> Cumulative practice hours vs. overall MINT performance. Pearson correlation is significant at the 0.1% level. <bold>(C)</bold> Violin plot showing AWM task performance for Musician (mean&#x202F;=&#x202F;76.68, <italic>SD</italic>&#x202F;=&#x202F;16.60, <italic>N</italic>&#x202F;=&#x202F;19) and Non-Musician groups (mean&#x202F;=&#x202F;55.44, <italic>SD</italic>&#x202F;=&#x202F;6.82, <italic>N</italic>&#x202F;=&#x202F;18). MINT performance for Musician (mean&#x202F;=&#x202F;78.74, <italic>SD</italic>&#x202F;=&#x202F;9.48) and Non-Musician groups (mean&#x202F;=&#x202F;66.15, <italic>SD</italic>&#x202F;=&#x202F;11.54). Significant group difference for both tasks <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001. <bold>(D)</bold> AWM ability vs. overall MINT performance. Pearson correlation is significant at the 0.1% level.</p>
</caption>
<graphic xlink:href="fpsyg-16-1538511-g005.tif"/>
</fig>
</sec>
<sec id="sec24">
<title>Musical training and MINT outcomes</title>
<p>Descriptive statistics for overall MINT performance are presented in <xref ref-type="table" rid="tab5">Table 5</xref>. Independent samples t-test between Experiment 1 and 2 indicates a significantly lower MINT score for the subject in Experiment 2; <italic>t</italic>(144)&#x202F;=&#x202F;&#x2212;5.36, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001. The mean accuracy scores for each MINT condition were: Baseline (Pitch)&#x202F;=&#x202F;72.45 (<italic>SD</italic>&#x202F;=&#x202F;16.25), Rhythm&#x202F;=&#x202F;59.51 (<italic>SD</italic>&#x202F;=&#x202F;12.95), Spatial&#x202F;=&#x202F;71.76 (<italic>SD</italic>&#x202F;=&#x202F;13.97), Visual&#x202F;=&#x202F;83.43 (<italic>SD</italic>&#x202F;=&#x202F;15.77), and Prediction&#x202F;=&#x202F;81.47 (<italic>SD</italic>&#x202F;=&#x202F;15.19). The mean accuracy scores for each SNR level were: SNR -3&#x202F;=&#x202F;79.29 (<italic>SD</italic>&#x202F;=&#x202F;14.38), SNR &#x2212;6&#x202F;=&#x202F;74.00 (<italic>SD</italic>&#x202F;=&#x202F;12.51), and SNR &#x2212;9&#x202F;=&#x202F;67.88 (<italic>SD</italic>&#x202F;=&#x202F;13.04).</p>
<p>A correlation between cumulative practice hours and MINT task performance was tested with <italic>r</italic> =&#x202F;0.293 (<italic>p</italic> =&#x202F;0.008; <xref ref-type="fig" rid="fig5">Figure 5B</xref>; see <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S3</xref> for Spearman&#x2019;s results). Cumulative hours of practice were also correlated with the Baseline (Pitch) (<italic>r</italic>&#x202F;=&#x202F;0.21, <italic>p</italic>&#x202F;=&#x202F;0.040), Prediction (<italic>r</italic>&#x202F;=&#x202F;0.29, <italic>p</italic>&#x202F;=&#x202F;0.009), and Visual (<italic>r</italic>&#x202F;=&#x202F;0.29, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.009) sub-conditions. In addition, cumulative hours of practice correlated with SNR &#x2212;3 (<italic>r</italic>&#x202F;=&#x202F;0.34, <italic>p</italic>&#x202F;=&#x202F;0.003) and SNR &#x2212;9 (<italic>r</italic>&#x202F;=&#x202F;0.21, <italic>p</italic>&#x202F;=&#x202F;0.043). Mann&#x2013;Whitney U test shows a significant difference in total MINT performance between Musicians and Non-Musicians (U&#x202F;=&#x202F;281, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001; <xref ref-type="table" rid="tab5">Table 5</xref>; <xref ref-type="fig" rid="fig5">Figure 5C</xref>).</p>
</sec>
<sec id="sec25">
<title>AWM and MINT performance</title>
<p>Pearson correlation analysis evaluated the correlation between performance on the AWM and MINT tasks. AWM score significantly correlated with the overall MINT score (<italic>r</italic> =&#x202F;0.573, <italic>p</italic> &#x003C;&#x202F;0.001; <xref ref-type="fig" rid="fig5">Figure 5D</xref>). The correlations between AWM also significantly correlated with all MINT sub-conditions and SNR levels, as presented in <xref ref-type="table" rid="tab6">Table 6</xref> (see also <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S4</xref>). Fisher&#x2019;s test performed to compare the differences between the z-transformations of each pair of correlations demonstrated that none of the correlations are significantly larger than the others.</p>
<table-wrap position="float" id="tab6">
<label>Table 6</label>
<caption>
<p>Experiment 2.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="7">(A)</th>
</tr>
<tr>
<th/>
<th align="center" valign="top">Pitch total</th>
<th align="center" valign="top">Prediction total</th>
<th align="center" valign="top">Rhythm total</th>
<th align="center" valign="top">Spatial total</th>
<th align="center" valign="top">Visual total</th>
<th align="center" valign="top">MINT overall</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">AWM task performance</td>
<td align="center" valign="bottom">0.451&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.424&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.292&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.438&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.507&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.573&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="bottom">Sig. (1-tailed)</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">0.008</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
</tr>
</tbody>
</table>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="center" valign="top" colspan="4">(B)</th>
</tr>
<tr>
<th/>
<th align="center" valign="top">SNR&#x2212;9</th>
<th align="center" valign="top">SNR&#x2212;6</th>
<th align="center" valign="top">SNR&#x2212;3</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">AWM task performance</td>
<td align="center" valign="bottom">0.435&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.481&#x002A;&#x002A;&#x002A;</td>
<td align="center" valign="bottom">0.508&#x002A;&#x002A;&#x002A;</td>
</tr>
<tr>
<td align="left" valign="bottom">Sig. (1-tailed)</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
<td align="center" valign="bottom">&#x003C;0.001</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>(A) AWM task performance vs. MINT conditions. Pearson correlations based on AWM task percent correct and average MINT scores for the corresponding condition. (B) AWM Task Performance vs. MINT SNR Levels. Pearson correlations based on AWM task percent correct and average MINT scores for each SNR level across conditions. &#x002A;&#x002A;&#x002A;denotes statistical significance at the 0.1% level.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec26">
<title>Mediating role of AWM</title>
<p>Regression analyses were conducted to assess each component of the mediation model proposed in Experiment 1. Linear regression with bootstrapping revealed a positive association between cumulative music training hours and both AWM performance [<italic>R</italic>&#x202F;=&#x202F;0.370, <italic>F</italic>(1, 66)&#x202F;=&#x202F;10.48, <italic>p</italic>&#x202F;=&#x202F;0.004] and MINT performance [<italic>R</italic>&#x202F;=&#x202F;0.293, <italic>F</italic>(1, 66)&#x202F;=&#x202F;6.18, <italic>p</italic>&#x202F;=&#x202F;0.015]. AWM ability as the proposed mediator was also positively related to MINT test scores [<italic>R</italic>&#x202F;=&#x202F;0.573, <italic>F</italic>(1, 66)&#x202F;=&#x202F;32.24, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001]. Subsequent multiple regression analysis was performed to assess the effects of musical training hours (<italic>X<sub>1</sub></italic>) and AWM (<italic>X<sub>2</sub></italic>) on MINT performance (<italic>Y</italic>). The overall regression model was significant [<italic>R</italic>&#x202F;=&#x202F;0.579, <italic>F</italic>(2, 65)&#x202F;=&#x202F;16.42, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001], with VIF&#x202F;=&#x202F;1.16, MSE&#x202F;=&#x202F;83.62, &#x03B7;<sup>2</sup>&#x202F;=&#x202F;0.336, and predictors contributing to improved MINT performance (&#x03B2;<sub>1</sub>&#x202F;=&#x202F;0.093, <italic>p</italic>&#x202F;=&#x202F;0.394; &#x03B2;<sub>2</sub> =&#x202F;0.538, <italic>p</italic>&#x202F;&#x003C;&#x202F;0.001).</p>
<p>Given that the multiple regression model and the paths were significant and consistent with Experiment 1, mediation analysis was conducted using the same bootstrapping method (refer to Methods and Materials section Data Analysis). A 95% confidence interval for the indirect effect was derived from bootstrap samples and demonstrated a significant mediating role of AWM in the relationship between music training and MINT task performance. Results also show that the direct effect of music training on MINT became non-significant (p&#x202F;=&#x202F;0.394) when controlling for AWM. Additional bootstrapping analysis also revealed a mediating effect of AWM on the Baseline (Pitch), Prediction, Spatial and Visual sub-conditions, and across all SNR levels.</p>
</sec>
</sec>
<sec id="sec27">
<title>Discussion</title>
<sec id="sec28">
<title>Effects of musical training on AWM and music-in-noise perception</title>
<p>The findings from both Experiments 1 and 2 provide compelling evidence that supports our hypothesis of a musician&#x2019;s advantage in both AWM abilities and music-in-noise perception. Importantly, the musician advantage was consistently observed across two distinct samples, which differed in overall musical experience, proportion of musicianship, and average performance on both tasks.</p>
<p>A meta-analysis by <xref ref-type="bibr" rid="ref73">Talamini et al. (2017)</xref> demonstrated that musicians outperform non-musicians across various memory domains, including long-term, short-term, and working memory, with a particularly pronounced advantage for tonal stimuli. To investigate this tonal aspect of AWM in which musicians excel, the task in this study required participants to detect a local pitch change between two tonal patterns that differed in temporal order. This AWM task not only captured auditory retention capabilities but also assessed the ability to mentally manipulate the stimuli (i.e., serial order processing; <xref ref-type="bibr" rid="ref2">Albouy et al., 2017</xref>; <xref ref-type="bibr" rid="ref26">Foster et al., 2013</xref>), along with related cognitive skills such as decision-making, attention, processing speed, etc. Correlational analyses between cumulative practice hours and AWM task performance from Experiment 1 indicated a positive association between music experience and AWM abilities (<xref ref-type="fig" rid="fig3">Figure 3A</xref>), a finding that was replicated in Experiment 2 (<xref ref-type="fig" rid="fig5">Figure 5A</xref>). Moreover, the group comparison underscores this advantage, as musicians from both studies consistently outperformed their non-musician counterparts on the standardized measures of AWM (<xref ref-type="fig" rid="fig3">Figures 3C</xref>, <xref ref-type="fig" rid="fig5">5C</xref>). These results are supported by existing literature, which consistently demonstrates behavioral, electrophysiological (event-related potential), and neuro-oscillatory evidence for the superiority of musicians in AWM abilities (<xref ref-type="bibr" rid="ref2">Albouy et al., 2017</xref>; <xref ref-type="bibr" rid="ref26">Foster et al., 2013</xref>; <xref ref-type="bibr" rid="ref28">George and Coch, 2011</xref>).</p>
<p>Analyses of the overall MINT task performance in Experiment 1 in relation to cumulative practice hours suggests a clear association between musical experience and improved music-in-noise perception (<xref ref-type="fig" rid="fig3">Figure 3B</xref>). Although subjects in Experiment 2 showed an overall poorer performance on the MINT&#x2014;potentially due to differences in musicianship and/or increased task SNR difficulty&#x2014;the correlation between musical experience and MINT performance remained consistent and significant (<xref ref-type="fig" rid="fig5">Figure 5B</xref>). In other words, the relationship between musical experience and music-in-noise perception is stable across different signal-to-noise levels tested. Furthermore, significant musician advantage on music-in-noise perception was also observed in both studies when comparing the musician and non-musician group differences in the overall MINT performance (<xref ref-type="fig" rid="fig3">Figures 3C</xref>, <xref ref-type="fig" rid="fig5">5C</xref>). These results are in line with the findings from <xref ref-type="bibr" rid="ref13">Coffey et al. (2019)</xref> original MINT study and the subsequent MINT results by <xref ref-type="bibr" rid="ref37">Hsieh et al. (2022)</xref>, further validating the MINT&#x2019;s reliability and supporting the cognitive benefits of musical expertise amid varying levels of noise interference.</p>
</sec>
<sec id="sec29">
<title>AWM ability and music-in-noise perception</title>
<p>Musicians&#x2019; music-in-noise benefits may arise from improvements in both auditory perception and cognitive processing. On the perceptual side, musicians demonstrate an increased sensitivity to fundamental acoustic features critical for music perception, such as pitch discrimination and temporal fine structure (<xref ref-type="bibr" rid="ref52">Micheyl et al., 2006</xref>; <xref ref-type="bibr" rid="ref53">Mishra et al., 2015</xref>). Cognitively, studies have shown a connection between musicianship and enhancements in cognitive faculties including working memory and attention (<xref ref-type="bibr" rid="ref8">Bidelman and Yoo, 2020</xref>; <xref ref-type="bibr" rid="ref78">Yoo and Bidelman, 2019</xref>), which may be linked to stream segregation improvements.</p>
<p>Evidence suggesting that AWM plays a crucial role in music-in-noise perception stems from the strong correlation between performance on the AWM task and the overall MINT score, observed in both Experiment 1 (<xref ref-type="fig" rid="fig3">Figure 3D</xref>) and Experiment 2 (<xref ref-type="fig" rid="fig5">Figure 5D</xref>). This finding replicates the original MINT study (<xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>), and is consistent with the majority of the SIN literature which suggests that working memory for phonological or tonal information is linked to improved speech segregation abilities (<xref ref-type="bibr" rid="ref8">Bidelman and Yoo, 2020</xref>; <xref ref-type="bibr" rid="ref25">Escobar et al., 2020</xref>; <xref ref-type="bibr" rid="ref44">Lad et al., 2020</xref>; <xref ref-type="bibr" rid="ref51">Mattys et al., 2012</xref>; <xref ref-type="bibr" rid="ref78">Yoo and Bidelman, 2019</xref>).</p>
<p>The mediation analysis conducted in Experiment 1 supports our hypothesis that AWM ability significantly mediates the relationship between musical experience and music-in-noise perception (<xref ref-type="fig" rid="fig4">Figure 4</xref>). This mediation model was successfully replicated in Experiment 2, which included a more heterogeneous musician population. The comparable results from Experiment 2 reinforce the reliability and generalizability of our initial findings. Overall, our results suggest that musicians&#x2019; enhanced AWM skills are a crucial driving force behind their enhanced MINT performance, and that musical training is associated with improvements in the performance of auditory stream segregation tasks through the enhancement of AWM capabilities. This mediating effect of AWM in music-in-noise performance parallels the mediation model proposed for AWM&#x2019;s role in SIN performance (<xref ref-type="bibr" rid="ref43">Kraus et al., 2012</xref>; <xref ref-type="bibr" rid="ref59">Parbery-Clark et al., 2009b</xref>). <xref ref-type="bibr" rid="ref59">Parbery-Clark et al. (2009b)</xref> demonstrated that musicians possess superior AWM skills, which those authors identify as a significant factor behind the group&#x2019;s improved SIN performance. In addition, <xref ref-type="bibr" rid="ref8">Bidelman and Yoo (2020)</xref> found that the relationship between musicianship and performance on a complex SIN task did not remain significant after controlling for working memory, which is associated with the listener&#x2019;s year of musical training. This finding supports the concept that auditory stream segregation superiority is driven heavily by the enhanced working memory capacity, likely developed through musical training, although aptitude may also play role.</p>
<p>Although evidence supports the importance of AWM in overall stream segregation, the precise mechanisms underlying its contribution remain unclear. The predominant literature on SIN has focused on the role of AWM in facilitating the understanding of linguistic context (<xref ref-type="bibr" rid="ref43">Kraus et al., 2012</xref>). For example, the Ease of Language Understanding (ELU) model by <xref ref-type="bibr" rid="ref66">R&#x00F6;nnberg et al. (2013)</xref> posits that working memory enables the listener to hold a schematic representation of speech while processing contextual information, using linguistic knowledge to compensate for missing information in adverse listening environments. In addition, the ELU model states that individuals with enhanced working memory capacity can apply more mental resources to resolve the phonological and semantic aspects of a listening task (<xref ref-type="bibr" rid="ref66">R&#x00F6;nnberg et al., 2013</xref>). It follows that the advantage offered by AWM in aiding SIN processing may depend largely on the redundancy of linguistic contextual cues (e.g., phonological, lexical, syntactic, and semantic information) of the speech signal tested (<xref ref-type="bibr" rid="ref29">Gordon-Salant and Cole, 2016</xref>). However, given the consistent relationship between AWM and MINT performance&#x2014;which is not influenced by linguistic factors&#x2014;our study provides evidence that the benefits of AWM in stream segregation extend beyond the speech domain, pointing to more fundamental mechanisms that are more generally involved in stream segregation processing.</p>
</sec>
<sec id="sec30">
<title>AWM&#x2019;s association with perceptual and cognitive components in stream segregation</title>
<p>One advantage of the MINT over standard SIN tests is its ability to assess specific cues and auditory sub-skills related to stream segregation (<xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>), offering insights into how AWM may interact with the perceptual and cognitive elements involved in this process. The original MINT study indicated that AWM has the most significant contribution to the Prediction condition, and the relationship between musical training and the Prediction task diminished in significance when AWM performance was factored in as a covariate in the analysis (<xref ref-type="bibr" rid="ref13">Coffey et al., 2019</xref>). Prior research also supports the role of AWM in musical predictive processing, highlighting its importance in top-down schematic expectations&#x2014;the concept that knowing the pattern to be segregated <italic>a priori</italic> facilitates subsequent detection (<xref ref-type="bibr" rid="ref7">Bey and McAdams, 2002</xref>).</p>
<p>However, contrary to earlier findings, here we did not observe a more important contribution of AWM to the MINT Prediction condition compared to other conditions. Instead, there were significant and consistent correlations between AWM and all MINT sub-tasks in both Experiments (<xref ref-type="table" rid="tab3">Table 3A</xref>; <xref ref-type="table" rid="tab6">Table 6A</xref>). This finding suggests that AWM&#x2019;s contribution is only one among many factors that modulate stream segregation situations.</p>
<p>One possible explanation for the contribution of AWM to general stream segregation is that enhanced AWM allows a more precise representation of acoustic signals in the mental workspace (<xref ref-type="bibr" rid="ref43">Kraus et al., 2012</xref>). Research suggests that working memory is linked to improved performance on a rhythm synchronization task, where participants are required to reproduce the temporal structure of the presented rhythms (<xref ref-type="bibr" rid="ref5">Bailey and Penhune, 2010</xref>). It is also indicated that individuals who can effectively retain auditory source properties, such as frequency and temporal fluctuations over time have a perceptual advantage in SIN tasks (<xref ref-type="bibr" rid="ref44">Lad et al., 2020</xref>; <xref ref-type="bibr" rid="ref45">Lad et al., 2024</xref>). It is therefore plausible that the ability to maintain acoustic information accurately aids the sequential segregation processes essential for stream intelligibility (<xref ref-type="bibr" rid="ref10">Bregman, 1990</xref>).</p>
<p>Another perspective involves attention. <xref ref-type="bibr" rid="ref17">Dalton et al. (2009)</xref> manipulated the working memory load during a distractor interference task, demonstrating a causal role for the availability of working memory in auditory selective attention. In addition, it is suggested that segregating auditory streams from background noise draws upon attentional resources (<xref ref-type="bibr" rid="ref33">Heinrich et al., 2008</xref>), and accomplishing such tasks necessitates the allocation of one&#x2019;s limited cognitive resources to balance the competing demands of attention, processing, and storage (<xref ref-type="bibr" rid="ref75">Wingfield and Tun, 2007</xref>). It is therefore plausible that enhanced AWM proficiency promotes the maintenance and encoding of auditory signals, which in turn allows for more efficient use of attention resources to extract and recall the target stream.</p>
<p>In addition, the advantages of AWM can be understood through the temporal aspects of information processing: temporal integration and serial order processing. On the one hand, it is proposed that working memory aids the linkage between recent past and imminent future events, thus serving both a retrospective role in information retention and a prospective role in anticipation (<xref ref-type="bibr" rid="ref27">Fuster and Bressler, 2012</xref>). Specifically, prior literature proposes that working memory is important for minimizing distractor interference through the active maintenance of current stimulus-processing priorities (<xref ref-type="bibr" rid="ref17">Dalton et al., 2009</xref>; <xref ref-type="bibr" rid="ref46">Lavie, 2005</xref>). In stream segregation, AWM may therefore enable individuals to hold fragments of auditory information while processing, integrating, and anticipating degraded target signals.</p>
<p>On the other hand, the AWM task used in this study, which requires temporal reversal, captures item-based retention and serial order processing, which have been shown to be distinct processes. Serial ordering, in particular, is thought to be a domain-general process based on positional codes, as observed in verbal and musical working memory studies (<xref ref-type="bibr" rid="ref39">Hurlstone et al., 2014</xref>; <xref ref-type="bibr" rid="ref50">Majerus, 2019</xref>; <xref ref-type="bibr" rid="ref30">Gorin, 2022</xref>). Since melodic retention and prediction did not appear to play a special role in stream segregation, serial ordering may serve as an alternative key factor, contributing to tracking the sequence of items over time and thereby enhancing the ability to organize auditory streams. It will be of interest in future research to study the contribution of AWM when measured with tasks that do not require serial order processing, such as for example musical transposition.</p>
</sec>
<sec id="sec31">
<title>The auditory dorsal stream and its implications for musician enhancement</title>
<p>The dorsal stream of auditory processing, which involves the parietal lobe, dorsal premotor cortex, and dorsolateral frontal regions, is central to higher-order cognitive auditory functions. It supports the manipulation of sound patterns in working memory, auditory-motor integration, abstract temporal representations, and predictive coding (<xref ref-type="bibr" rid="ref65">Rauschecker and Scott, 2009</xref>; <xref ref-type="bibr" rid="ref80">Zatorre, 2024</xref>). Neuroimaging studies highlight the dorsal stream&#x2019;s key role in AWM, with activations in parietal regions associated with various kinds of mental transformation and manipulation processes (<xref ref-type="bibr" rid="ref26">Foster et al., 2013</xref>; <xref ref-type="bibr" rid="ref81">Zatorre et al., 2010</xref>). Moreover, <xref ref-type="bibr" rid="ref2">Albouy et al. (2017)</xref> have observed that sustained evoked activity in the bilateral dorsal streams, particularly through long-range theta phase locking and increased local theta power in the IPS, is associated with successful AWM manipulation. Furthermore, when theta power is boosted in the dorsal stream via rhythmic brain stimulation (<xref ref-type="bibr" rid="ref2">Albouy et al., 2017</xref>) or via flickering visual rotating stimuli (<xref ref-type="bibr" rid="ref1">Albouy et al., 2022</xref>), AWM performance is also enhanced.</p>
<p>While perceiving auditory signals in background noise heavily engages primary and non-primary auditory regions (<xref ref-type="bibr" rid="ref36">Holmes et al., 2021</xref>; <xref ref-type="bibr" rid="ref41">Kell and McDermott, 2019</xref>; <xref ref-type="bibr" rid="ref64">Puschmann et al., 2019</xref>), research indicates that motor and somatosensory areas are also more actively recruited under challenging listening conditions (for review, see <xref ref-type="bibr" rid="ref68">Skipper et al., 2017</xref>). This suggests a compensatory mechanism of dorsal steam activity for reduced processing specificity in the auditory system (<xref ref-type="bibr" rid="ref21">Du et al., 2014</xref>). Importantly, a study comparing musicians and non-musicians found that the benefits of musical training on SIN perception in difficult listening contexts were related to activity in the motor cortices of the auditory dorsal streams (<xref ref-type="bibr" rid="ref22">Du and Zatorre, 2017</xref>).</p>
<p>Further research has shown that music training enhances functional connectivity within the dorsal auditory stream (<xref ref-type="bibr" rid="ref40">J&#x00FC;nemann et al., 2023</xref>). Musicians also exhibit greater structural connectivity in the white matter tracts of the dorsal stream (i.e., arcuate fasciculus and superior longitudinal fasciculus; <xref ref-type="bibr" rid="ref32">Halwani et al., 2011</xref>; <xref ref-type="bibr" rid="ref57">Oechslin et al., 2010</xref>). Differences in the microstructural plasticity of dorsal white matter are suggested to underlie musicians&#x2019; improved SIN perception (<xref ref-type="bibr" rid="ref48">Li et al., 2021</xref>). Considering the role of the auditory dorsal stream in AWM and SIN perception, we thus infer that the musician enhancements in these abilities may be rooted in this stream, although the exact mechanisms warrant further exploration.</p>
</sec>
<sec id="sec32">
<title>Implications for age-related hearing loss</title>
<p>Auditory functioning is one of the most prevalently affected sensory modalities in the elderly population (<xref ref-type="bibr" rid="ref77">Yamasoba et al., 2013</xref>; <xref ref-type="bibr" rid="ref19">Davis et al., 2016</xref>). In addition, older adults show deficits in speech recognition in noisy environments and AWM (<xref ref-type="bibr" rid="ref24">Dubno et al., 1984</xref>; <xref ref-type="bibr" rid="ref38">Humes and Floyd, 2005</xref>). Previous studies demonstrated that older musicians exhibit enhanced performance in AWM and SIN perception compared to their non-musician counterparts, suggesting that musical experience may mitigate age-related hearing challenges (<xref ref-type="bibr" rid="ref83">Zhang et al., 2021</xref>; <xref ref-type="bibr" rid="ref82">Zendel et al., 2019</xref>).</p>
<p>Recent longitudinal studies assigning older adults to musical activities (piano/choir) have also demonstrated behavioral, neurophysiological, and neuro-oscillatory evidence of improvements in SIN perception (<xref ref-type="bibr" rid="ref76">Worschech et al., 2021</xref>; <xref ref-type="bibr" rid="ref34">Hennessy et al., 2021</xref>; <xref ref-type="bibr" rid="ref23">Dubinsky et al., 2019</xref>; <xref ref-type="bibr" rid="ref31">Gray et al., 2022</xref>). Shedding light onto the mediating role of AWM in stream segregation, we propose that future music programs designed to address hearing challenges in older adults should focus on enhancing AWM to achieve optimal intervention outcomes.</p>
</sec>
<sec id="sec33">
<title>Limitations and future directions</title>
<p>Limitations of the current study include reliance on self-report music history questionnaire responses and the challenge of precisely controlling for the nuanced variations of individual musical experiences and expertise (e.g., learning styles, extent of practice). Moreover, the correlational design of the study does not address issues regarding self-selection and the direction of causality, particularly considering evidence suggesting that auditory and musical expertise arises from a combination of genetic predispositions and experience-driven plasticity (<xref ref-type="bibr" rid="ref9002">Schellenberg, 2015</xref>; <xref ref-type="bibr" rid="ref9003">Zatorre, 2013</xref>). The inherent predispositions for AWM or stream segregation abilities could potentially influence one&#x2019;s path toward musical engagement, an aspect that warrants further investigation.</p>
<p>Longitudinal studies with school-aged children (as well as the elderly, as described in the preceding section) provide evidence that music instruction is in fact causally associated with moderate benefits in SIN and AWM abilities (<xref ref-type="bibr" rid="ref70">Slater et al., 2015</xref>; <xref ref-type="bibr" rid="ref54">Nie et al., 2022</xref>), but of course, this does not mean that predisposing factors do not exist. Moreover, research has demonstrated a relationship between music and language performance in elementary school children (<xref ref-type="bibr" rid="ref84">Zuk et al., 2013</xref>), which is primarily driven by temporal processing (<xref ref-type="bibr" rid="ref4">Andrade et al., 2024</xref>). These findings suggest some shared contributions, yet the extent of transfer effects from music to speech and phonological processing remains unclear. Therefore, future research directions entail conducting longitudinal studies to examine the development of both speech-in-noise and music-in-noise perception, further unraveling the relationship between musical training, AWM, and overall auditory stream segregation. Such endeavors will also help elucidate experience-dependent plasticity in the auditory domain and contribute to a deeper understanding of the development of higher-level auditory cognitive mechanisms.</p>
</sec>
</sec>
</sec>
<sec sec-type="conclusions" id="sec34">
<title>Conclusion</title>
<p>This study explores the influence of musical training on two auditory cognitive processes: AWM and stream segregation. As hypothesized, our findings provide support for a musician advantage in AWM abilities and music-in-noise perception. We show using replication across two samples that musicians&#x2019; enhanced AWM skill is one of the driving forces behind their better music-in-noise performance, suggesting that musicianship fosters improvements in stream segregation through the enhancement of AWM capabilities. In addition, the study&#x2019;s two-phase design strengthens the generalizability of the results across various populations and conditions. Together, these findings shed light on the relationship between musical training, AWM, and stream segregation, underscoring the potential for music-based interventions to enhance auditory processing abilities.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec35">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="sec36">
<title>Ethics statement</title>
<p>The studies involving humans were approved by McGill University Faculty of Medicine Research Ethics Board Western University Nomedical Research Ethics Board. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="sec37">
<title>Author contributions</title>
<p>ML: Conceptualization, Formal analysis, Investigation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. IA-B: Investigation, Writing &#x2013; review &#x0026; editing. MF: Investigation, Writing &#x2013; review &#x0026; editing. M-EL: Investigation, Writing &#x2013; review &#x0026; editing. JS: Investigation, Writing &#x2013; review &#x0026; editing. EI: Investigation, Writing &#x2013; review &#x0026; editing. AP: Investigation, Writing &#x2013; review &#x0026; editing. NR: Investigation, Writing &#x2013; review &#x0026; editing. NL: Investigation, Writing &#x2013; review &#x0026; editing. TL: Investigation, Writing &#x2013; review &#x0026; editing. KN: Investigation, Writing &#x2013; review &#x0026; editing. KH: Investigation, Writing &#x2013; review &#x0026; editing. JH: Investigation, Writing &#x2013; review &#x0026; editing. EC: Writing &#x2013; review &#x0026; editing. JG: Supervision, Writing &#x2013; review &#x0026; editing. RZ: Supervision, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="funding-information" id="sec38">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This research was supported in part by funding from the Canada First Research Excellence Fund, awarded to RZ and JG via the Healthy Brains, Healthy Lives initiative at McGill University and BrainsCAN at Western University. This work was also supported via an operating grant from the Canadian Institutes of Health Research (486895 to R.Z.), and by the Fonds de Recherche du Qu&#x00E9;bec via funding to the Center for Research in Brain, Language and Music (RSMA-340954 to R.Z.). R.Z. is funded via the Canada Research Chair program, and by the Scientific Grand Prize from the Fondation pour l&#x2019;Audition (Paris) (FPA RD-2021-6).</p>
</sec>
<ack>
<p>The authors thank Emmett Lewis-Hoeber, Sebastian Kolde, Ethan Yan, Amy Li, Lucy Core, and Emily Chen for their assistance in data collection, and Philippe Albouy for providing the task stimuli.</p>
</ack>
<sec sec-type="COI-statement" id="sec39">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author(s) declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec sec-type="ai-statement" id="sec40">
<title>Generative AI statement</title>
<p>The authors declare that no Gen AI was used in the creation of this manuscript.</p>
</sec>
<sec sec-type="disclaimer" id="sec41">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec42">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fpsyg.2025.1538511/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fpsyg.2025.1538511/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Supplementary_file_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Albouy</surname> <given-names>P.</given-names></name> <name><surname>Martinez-Moreno</surname> <given-names>Z. E.</given-names></name> <name><surname>Hoyer</surname> <given-names>R. S.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name> <name><surname>Baillet</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Supramodality of neural entrainment: rhythmic visual stimulation causally enhances auditory working memory performance</article-title>. <source>Sci. Adv.</source> <volume>8</volume>:<fpage>eabj9782</fpage>. doi: <pub-id pub-id-type="doi">10.1126/sciadv.abj9782</pub-id>, PMID: <pub-id pub-id-type="pmid">35196074</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Albouy</surname> <given-names>P.</given-names></name> <name><surname>Weiss</surname> <given-names>A.</given-names></name> <name><surname>Baillet</surname> <given-names>S.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Selective entrainment of Theta oscillations in the dorsal stream causally enhances auditory working memory performance</article-title>. <source>Neuron</source> <volume>94</volume>, <fpage>193</fpage>&#x2013;<lpage>206.e5</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2017.03.015</pub-id>, PMID: <pub-id pub-id-type="pmid">28343866</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anderson</surname> <given-names>S.</given-names></name> <name><surname>White-Schwoch</surname> <given-names>T.</given-names></name> <name><surname>Parbery-Clark</surname> <given-names>A.</given-names></name> <name><surname>Kraus</surname> <given-names>N.</given-names></name></person-group> (<year>2013</year>). <article-title>A dynamic auditory-cognitive system supports speech-in-noise perception in older adults</article-title>. <source>Hear. Res.</source> <volume>300</volume>, <fpage>18</fpage>&#x2013;<lpage>32</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2013.03.006</pub-id>, PMID: <pub-id pub-id-type="pmid">23541911</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Andrade</surname> <given-names>P. E.</given-names></name> <name><surname>M&#x00FC;llensiefen</surname> <given-names>D.</given-names></name> <name><surname>Andrade</surname> <given-names>O. V. C. A.</given-names></name> <name><surname>Dunstan</surname> <given-names>J.</given-names></name> <name><surname>Zuk</surname> <given-names>J.</given-names></name> <name><surname>Gaab</surname> <given-names>N.</given-names></name></person-group> (<year>2024</year>). <article-title>Sequence processing in music predicts Reading skills in young readers: a longitudinal study</article-title>. <source>J. Learn. Disabil.</source> <volume>57</volume>, <fpage>43</fpage>&#x2013;<lpage>60</lpage>. doi: <pub-id pub-id-type="doi">10.1177/00222194231157722</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bailey</surname> <given-names>J. A.</given-names></name> <name><surname>Penhune</surname> <given-names>V. B.</given-names></name></person-group> (<year>2010</year>). <article-title>Rhythm synchronization performance and auditory working memory in early- and late-trained musicians</article-title>. <source>Exp. Brain Res.</source> <volume>204</volume>, <fpage>91</fpage>&#x2013;<lpage>101</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00221-010-2299-y</pub-id>, PMID: <pub-id pub-id-type="pmid">20508918</pub-id></citation></ref>
<ref id="ref9001"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baddeley</surname> <given-names>A.</given-names></name></person-group> (<year>1992</year>). <article-title>Working memory</article-title>. <source>Science</source>, <volume>255</volume>, <fpage>556</fpage>&#x2013;<lpage>559</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.1736359</pub-id>, PMID: <pub-id pub-id-type="pmid">486816</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bey</surname> <given-names>C.</given-names></name> <name><surname>McAdams</surname> <given-names>S.</given-names></name></person-group> (<year>2002</year>). <article-title>Schema-based processing in auditory scene analysis</article-title>. <source>Percept. Psychophys.</source> <volume>64</volume>, <fpage>844</fpage>&#x2013;<lpage>854</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03194750</pub-id>, PMID: <pub-id pub-id-type="pmid">12201342</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bidelman</surname> <given-names>G. M.</given-names></name> <name><surname>Yoo</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Musicians show improved speech segregation in competitive, multi-talker cocktail party scenarios</article-title>. <source>Front. Psychol.</source> <volume>11</volume>:<fpage>1927</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2020.01927</pub-id>, PMID: <pub-id pub-id-type="pmid">32973610</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Bregman</surname> <given-names>A. S.</given-names></name></person-group> (<year>1990</year>). <source>Auditory scene analysis: the perceptual organization of sound</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>The MIT Press</publisher-name>.</citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bregman</surname> <given-names>A. S.</given-names></name> <name><surname>Pinker</surname> <given-names>S.</given-names></name></person-group> (<year>1978</year>). <article-title>Auditory streaming and the building of timbre</article-title>. <source>Can. J. Psychol.</source> <volume>32</volume>, <fpage>19</fpage>&#x2013;<lpage>31</lpage>. doi: <pub-id pub-id-type="doi">10.1037/h0081664</pub-id>, PMID: <pub-id pub-id-type="pmid">728845</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brown</surname> <given-names>R. M.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name> <name><surname>Penhune</surname> <given-names>V. B.</given-names></name></person-group> (<year>2015</year>). <article-title>Expert music performance: cognitive, neural, and developmental bases</article-title>. <source>Prog. Brain Res.</source> <volume>217</volume>, <fpage>57</fpage>&#x2013;<lpage>86</lpage>. doi: <pub-id pub-id-type="doi">10.1016/bs.pbr.2014.11.021</pub-id>, PMID: <pub-id pub-id-type="pmid">25725910</pub-id></citation></ref>
<ref id="ref13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coffey</surname> <given-names>E. B. J.</given-names></name> <name><surname>Arseneau-Bruneau</surname> <given-names>I.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2019</year>). <article-title>The music-in-noise task (MINT): a tool for dissecting complex auditory perception</article-title>. <source>Front. Neurosci.</source> <volume>13</volume>:<fpage>199</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2019.00199</pub-id>, PMID: <pub-id pub-id-type="pmid">30930734</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Coffey</surname> <given-names>E. B. J.</given-names></name> <name><surname>Herholz</surname> <given-names>S. C.</given-names></name> <name><surname>Scala</surname> <given-names>S.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2011</year>). &#x201C;<article-title>Montreal music history questionnaire: a tool for the assessment of music-related experience in music cognition research</article-title>,&#x201D; in <conf-name>Proceedings of the Neurosciences and Music IV: Learning and Memory, Conference</conf-name> <publisher-loc>Edinburgh</publisher-loc>.</citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coffey</surname> <given-names>E. B. J.</given-names></name> <name><surname>Mogilever</surname> <given-names>N. B.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Speech-in-noise perception in musicians: a review</article-title>. <source>Hear. Res.</source> <volume>352</volume>, <fpage>49</fpage>&#x2013;<lpage>69</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2017.02.006</pub-id>, PMID: <pub-id pub-id-type="pmid">28213134</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dalton</surname> <given-names>P.</given-names></name> <name><surname>Santangelo</surname> <given-names>V.</given-names></name> <name><surname>Spence</surname> <given-names>C.</given-names></name></person-group> (<year>2009</year>). <article-title>The role of working memory in auditory selective attention</article-title>. <source>Q. J. Exp. Psychol.</source> <volume>62</volume>, <fpage>2126</fpage>&#x2013;<lpage>2132</lpage>. doi: <pub-id pub-id-type="doi">10.1080/17470210903023646</pub-id>, PMID: <pub-id pub-id-type="pmid">19557667</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Davis</surname> <given-names>M. H.</given-names></name> <name><surname>Johnsrude</surname> <given-names>I. S.</given-names></name></person-group> (<year>2007</year>). <article-title>Hearing speech sounds: top-down influences on the interface between audition and speech perception</article-title>. <source>Hear. Res.</source> <volume>229</volume>, <fpage>132</fpage>&#x2013;<lpage>147</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2007.01.014</pub-id>, PMID: <pub-id pub-id-type="pmid">17317056</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Davis</surname> <given-names>A.</given-names></name> <name><surname>McMahon</surname> <given-names>C. M.</given-names></name> <name><surname>Pichora-Fuller</surname> <given-names>K. M.</given-names></name> <name><surname>Russ</surname> <given-names>S.</given-names></name> <name><surname>Lin</surname> <given-names>F.</given-names></name> <name><surname>Olusanya</surname> <given-names>B. O.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Aging and hearing health: the life-course approach</article-title>. <source>The Gerontologist</source> <volume>56</volume>, <fpage>S256</fpage>&#x2013;<lpage>S267</lpage>. doi: <pub-id pub-id-type="doi">10.1093/geront/gnw033</pub-id>, PMID: <pub-id pub-id-type="pmid">26994265</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Deroche</surname> <given-names>M. L. D.</given-names></name> <name><surname>Limb</surname> <given-names>C. J.</given-names></name> <name><surname>Chatterjee</surname> <given-names>M.</given-names></name> <name><surname>Gracco</surname> <given-names>V. L.</given-names></name></person-group> (<year>2017</year>). <article-title>Similar abilities of musicians and non-musicians to segregate voices by fundamental frequency</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>142</volume>, <fpage>1739</fpage>&#x2013;<lpage>1755</lpage>. doi: <pub-id pub-id-type="doi">10.1121/1.5005496</pub-id>, PMID: <pub-id pub-id-type="pmid">29092612</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>Y.</given-names></name> <name><surname>Buchsbaum</surname> <given-names>B. R.</given-names></name> <name><surname>Grady</surname> <given-names>C. L.</given-names></name> <name><surname>Alain</surname> <given-names>C.</given-names></name></person-group> (<year>2014</year>). <article-title>Noise differentially impacts phoneme representations in the auditory and speech motor systems</article-title>. <source>Proc. Natl. Acad. Sci. USA</source> <volume>111</volume>, <fpage>7126</fpage>&#x2013;<lpage>7131</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1318738111</pub-id>, PMID: <pub-id pub-id-type="pmid">24778251</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>Y.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Musical training sharpens and bonds ears and tongue to hear speech better</article-title>. <source>Proc. Natl. Acad. Sci. USA</source> <volume>114</volume>, <fpage>13579</fpage>&#x2013;<lpage>13584</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1712223114</pub-id>, PMID: <pub-id pub-id-type="pmid">29203648</pub-id></citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dubinsky</surname> <given-names>E.</given-names></name> <name><surname>Wood</surname> <given-names>E. A.</given-names></name> <name><surname>Nespoli</surname> <given-names>G.</given-names></name> <name><surname>Russo</surname> <given-names>F. A.</given-names></name></person-group> (<year>2019</year>). <article-title>Short-term choir singing supports speech-in-noise perception and neural pitch strength in older adults with age-related hearing loss</article-title>. <source>Front. Neurosci.</source> <volume>13</volume>:<fpage>1153</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2019.01153</pub-id>, PMID: <pub-id pub-id-type="pmid">31849572</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dubno</surname> <given-names>J. R.</given-names></name> <name><surname>Dirks</surname> <given-names>D. D.</given-names></name> <name><surname>Morgan</surname> <given-names>D. E.</given-names></name></person-group> (<year>1984</year>). <article-title>Effects of age and mild hearing loss on speech recognition in noise</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>76</volume>, <fpage>87</fpage>&#x2013;<lpage>96</lpage>. doi: <pub-id pub-id-type="doi">10.1121/1.391011</pub-id>, PMID: <pub-id pub-id-type="pmid">6747116</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Escobar</surname> <given-names>J.</given-names></name> <name><surname>Mussoi</surname> <given-names>B. S.</given-names></name> <name><surname>Silberer</surname> <given-names>A. B.</given-names></name></person-group> (<year>2020</year>). <article-title>The effect of musical training and working memory in adverse listening situations</article-title>. <source>Ear Hear.</source> <volume>41</volume>, <fpage>278</fpage>&#x2013;<lpage>288</lpage>. doi: <pub-id pub-id-type="doi">10.1097/AUD.0000000000000754</pub-id>, PMID: <pub-id pub-id-type="pmid">32106117</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Foster</surname> <given-names>N. E. V.</given-names></name> <name><surname>Halpern</surname> <given-names>A. R.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2013</year>). <article-title>Common parietal activation in musical mental transformations across pitch and time</article-title>. <source>Neuroimage</source> <volume>75</volume>, <fpage>27</fpage>&#x2013;<lpage>35</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.02.044</pub-id>, PMID: <pub-id pub-id-type="pmid">23470983</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fuster</surname> <given-names>J. M.</given-names></name> <name><surname>Bressler</surname> <given-names>S. L.</given-names></name></person-group> (<year>2012</year>). <article-title>Cognit activation: a mechanism enabling temporal integration in working memory</article-title>. <source>Trends Cogn. Sci.</source> <volume>16</volume>, <fpage>207</fpage>&#x2013;<lpage>218</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2012.03.005</pub-id>, PMID: <pub-id pub-id-type="pmid">22440831</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>George</surname> <given-names>E. M.</given-names></name> <name><surname>Coch</surname> <given-names>D.</given-names></name></person-group> (<year>2011</year>). <article-title>Music training and working memory: an ERP study</article-title>. <source>Neuropsychologia</source> <volume>49</volume>, <fpage>1083</fpage>&#x2013;<lpage>1094</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2011.02.001</pub-id>, PMID: <pub-id pub-id-type="pmid">21315092</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gordon-Salant</surname> <given-names>S.</given-names></name> <name><surname>Cole</surname> <given-names>S. S.</given-names></name></person-group> (<year>2016</year>). <article-title>Effects of age and working memory capacity on speech recognition performance in noise among listeners with Normal hearing</article-title>. <source>Ear Hear.</source> <volume>37</volume>, <fpage>593</fpage>&#x2013;<lpage>602</lpage>. doi: <pub-id pub-id-type="doi">10.1097/AUD.0000000000000316</pub-id>, PMID: <pub-id pub-id-type="pmid">27232071</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gorin</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Temporal grouping effects in verbal and musical short-term memory: is serial order representation domain-general?</article-title> <source>Q. J. Exp. Psycholo.</source> <volume>75</volume>, <fpage>1603</fpage>&#x2013;<lpage>1627</lpage>. doi: <pub-id pub-id-type="doi">10.1177/17470218211057466</pub-id>, PMID: <pub-id pub-id-type="pmid">34698553</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gray</surname> <given-names>R.</given-names></name> <name><surname>Sarampalis</surname> <given-names>A.</given-names></name> <name><surname>Ba&#x015F;kent</surname> <given-names>D.</given-names></name> <name><surname>Harding</surname> <given-names>E. E.</given-names></name></person-group> (<year>2022</year>). <article-title>Working-memory, alpha-theta oscillations and musical training in older age: research perspectives for speech-on-speech perception</article-title>. <source>Front. Aging Neurosci.</source> <volume>14</volume>:<fpage>806439</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnagi.2022.806439</pub-id>, PMID: <pub-id pub-id-type="pmid">35645774</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Halwani</surname> <given-names>G. F.</given-names></name> <name><surname>Loui</surname> <given-names>P.</given-names></name> <name><surname>R&#x00FC;ber</surname> <given-names>T.</given-names></name> <name><surname>Schlaug</surname> <given-names>G.</given-names></name></person-group> (<year>2011</year>). <article-title>Effects of practice and experience on the arcuate fasciculus: comparing singers, instrumentalists, and non-musicians</article-title>. <source>Front. Psychol.</source> <volume>2</volume>:<fpage>156</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2011.00156</pub-id>, PMID: <pub-id pub-id-type="pmid">21779271</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Heinrich</surname> <given-names>A.</given-names></name> <name><surname>Schneider</surname> <given-names>B. A.</given-names></name> <name><surname>Craik</surname> <given-names>F. I.</given-names></name></person-group> (<year>2008</year>). <article-title>Investigating the influence of continuous babble on auditory short-term memory performance</article-title>. <source>Q. J. Exp. Psychol.</source> <volume>61</volume>, <fpage>735</fpage>&#x2013;<lpage>751</lpage>. doi: <pub-id pub-id-type="doi">10.1080/17470210701402372</pub-id>, PMID: <pub-id pub-id-type="pmid">17853231</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hennessy</surname> <given-names>S.</given-names></name> <name><surname>Wood</surname> <given-names>A.</given-names></name> <name><surname>Wilcox</surname> <given-names>R.</given-names></name> <name><surname>Habibi</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Neurophysiological improvements in speech-in-noise task after short-term choir training in older adults</article-title>. <source>Aging</source> <volume>13</volume>, <fpage>9468</fpage>&#x2013;<lpage>9495</lpage>. doi: <pub-id pub-id-type="doi">10.18632/aging.202931</pub-id>, PMID: <pub-id pub-id-type="pmid">33824226</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Herholz</surname> <given-names>S. C.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2012</year>). <article-title>Musical training as a framework for brain plasticity: behavior, function, and structure</article-title>. <source>Neuron</source> <volume>76</volume>, <fpage>486</fpage>&#x2013;<lpage>502</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2012.10.011</pub-id>, PMID: <pub-id pub-id-type="pmid">23141061</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Holmes</surname> <given-names>E.</given-names></name> <name><surname>Zeidman</surname> <given-names>P.</given-names></name> <name><surname>Friston</surname> <given-names>K. J.</given-names></name> <name><surname>Griffiths</surname> <given-names>T. D.</given-names></name></person-group> (<year>2021</year>). <article-title>Difficulties with speech-in-noise perception related to fundamental grouping processes in auditory cortex</article-title>. <source>Cereb. Cortex</source> <volume>31</volume>, <fpage>1582</fpage>&#x2013;<lpage>1596</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhaa311</pub-id>, PMID: <pub-id pub-id-type="pmid">33136138</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hsieh</surname> <given-names>I. H.</given-names></name> <name><surname>Tseng</surname> <given-names>H. C.</given-names></name> <name><surname>Liu</surname> <given-names>J. W.</given-names></name></person-group> (<year>2022</year>). <article-title>Domain-specific hearing-in-noise performance is associated with absolute pitch proficiency</article-title>. <source>Sci. Rep.</source> <volume>12</volume>:<fpage>16344</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-022-20869-2</pub-id>, PMID: <pub-id pub-id-type="pmid">36175508</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Humes</surname> <given-names>L. E.</given-names></name> <name><surname>Floyd</surname> <given-names>S. S.</given-names></name></person-group> (<year>2005</year>). <article-title>Measures of working memory, sequence learning, and speech recognition in the elderly</article-title>. <source>J. Speech Langu. Hear. Rese.</source> <volume>48</volume>, <fpage>224</fpage>&#x2013;<lpage>235</lpage>. doi: <pub-id pub-id-type="doi">10.1044/1092-4388(2005/016)</pub-id>, PMID: <pub-id pub-id-type="pmid">15938066</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hurlstone</surname> <given-names>M. J.</given-names></name> <name><surname>Hitch</surname> <given-names>G. J.</given-names></name> <name><surname>Baddeley</surname> <given-names>A. D.</given-names></name></person-group> (<year>2014</year>). <article-title>Memory for serial order across domains: an overview of the literature and directions for future research</article-title>. <source>Psychol. Bull.</source> <volume>140</volume>, <fpage>339</fpage>&#x2013;<lpage>373</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0034221</pub-id>, PMID: <pub-id pub-id-type="pmid">24079725</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>J&#x00FC;nemann</surname> <given-names>K.</given-names></name> <name><surname>Engels</surname> <given-names>A.</given-names></name> <name><surname>Marie</surname> <given-names>D.</given-names></name> <name><surname>Worschech</surname> <given-names>F.</given-names></name> <name><surname>Scholz</surname> <given-names>D. S.</given-names></name> <name><surname>Grouiller</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Increased functional connectivity in the right dorsal auditory stream after a full year of piano training in healthy older adults</article-title>. <source>Sci. Rep.</source> <volume>13</volume>:<fpage>19993</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-023-46513-1</pub-id>, PMID: <pub-id pub-id-type="pmid">37968500</pub-id></citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kell</surname> <given-names>A. J. E.</given-names></name> <name><surname>McDermott</surname> <given-names>J. H.</given-names></name></person-group> (<year>2019</year>). <article-title>Invariance to background noise as a signature of non-primary auditory cortex</article-title>. <source>Nat. Commun.</source> <volume>10</volume>:<fpage>3958</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41467-019-11710-y</pub-id>, PMID: <pub-id pub-id-type="pmid">31477711</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Killion</surname> <given-names>M. C.</given-names></name> <name><surname>Niquette</surname> <given-names>P. A.</given-names></name> <name><surname>Gudmundsen</surname> <given-names>G. I.</given-names></name> <name><surname>Revit</surname> <given-names>L. J.</given-names></name> <name><surname>Banerjee</surname> <given-names>S.</given-names></name></person-group> (<year>2004</year>). <article-title>Development of a quick speech-in-noise test for measuring signal-to-noise ratio loss in normal-hearing and hearing-impaired listeners</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>116</volume>, <fpage>2395</fpage>&#x2013;<lpage>2405</lpage>. doi: <pub-id pub-id-type="doi">10.1121/1.1784440</pub-id>, PMID: <pub-id pub-id-type="pmid">15532670</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kraus</surname> <given-names>N.</given-names></name> <name><surname>Strait</surname> <given-names>D. L.</given-names></name> <name><surname>Parbery-Clark</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>Cognitive factors shape brain networks for auditory skills: spotlight on auditory working memory</article-title>. <source>Ann. N. Y. Acad. Sci.</source> <volume>1252</volume>, <fpage>100</fpage>&#x2013;<lpage>107</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1749-6632.2012.06463.x</pub-id>, PMID: <pub-id pub-id-type="pmid">22524346</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lad</surname> <given-names>M.</given-names></name> <name><surname>Holmes</surname> <given-names>E.</given-names></name> <name><surname>Chu</surname> <given-names>A.</given-names></name> <name><surname>Griffiths</surname> <given-names>T. D.</given-names></name></person-group> (<year>2020</year>). <article-title>Speech-in-noise detection is related to auditory working memory precision for frequency</article-title>. <source>Sci. Rep.</source> <volume>10</volume>:<fpage>13997</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-020-70952-9</pub-id>, PMID: <pub-id pub-id-type="pmid">32814792</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lad</surname> <given-names>M.</given-names></name> <name><surname>Taylor</surname> <given-names>J. P.</given-names></name> <name><surname>Griffiths</surname> <given-names>T. D.</given-names></name></person-group> (<year>2024</year>). <article-title>The contribution of short-term memory for sound features to speech-in-noise perception and cognition</article-title>. <source>Hear. Res.</source> <volume>451</volume>:<fpage>109081</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2024.109081</pub-id>, PMID: <pub-id pub-id-type="pmid">39004015</pub-id></citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lavie</surname> <given-names>N.</given-names></name></person-group> (<year>2005</year>). <article-title>Distracted and confused?: selective attention under load</article-title>. <source>Trends Cogn. Sci.</source> <volume>9</volume>, <fpage>75</fpage>&#x2013;<lpage>82</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2004.12.004</pub-id>, PMID: <pub-id pub-id-type="pmid">15668100</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lewis</surname> <given-names>J. H.</given-names></name> <name><surname>Castellanos</surname> <given-names>I.</given-names></name> <name><surname>Moberly</surname> <given-names>A. C.</given-names></name></person-group> (<year>2021</year>). <article-title>The impact of neurocognitive skills on recognition of spectrally degraded sentences</article-title>. <source>J. Am. Acad. Audiol.</source> <volume>32</volume>, <fpage>528</fpage>&#x2013;<lpage>536</lpage>. doi: <pub-id pub-id-type="doi">10.1055/s-0041-1732438</pub-id>, PMID: <pub-id pub-id-type="pmid">34965599</pub-id></citation></ref>
<ref id="ref48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name> <name><surname>Du</surname> <given-names>Y.</given-names></name></person-group> (<year>2021</year>). <article-title>The microstructural plasticity of the arcuate fasciculus undergirds improved speech in noise perception in musicians</article-title>. <source>Cereb. Cortex</source> <volume>31</volume>, <fpage>3975</fpage>&#x2013;<lpage>3985</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhab063</pub-id>, PMID: <pub-id pub-id-type="pmid">34037726</pub-id></citation></ref>
<ref id="ref50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Majerus</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Verbal working memory and the phonological buffer: the question of serial order</article-title>. <source>Cortex</source> <volume>112</volume>, <fpage>122</fpage>&#x2013;<lpage>133</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2018.04.016</pub-id>, PMID: <pub-id pub-id-type="pmid">29887208</pub-id></citation></ref>
<ref id="ref51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mattys</surname> <given-names>S. L.</given-names></name> <name><surname>Davis</surname> <given-names>M. H.</given-names></name> <name><surname>Bradlow</surname> <given-names>A. R.</given-names></name> <name><surname>Scott</surname> <given-names>S. K.</given-names></name></person-group> (<year>2012</year>). <article-title>Speech recognition in adverse conditions: a review</article-title>. <source>Lang. Cogn. Process.</source> <volume>27</volume>, <fpage>953</fpage>&#x2013;<lpage>978</lpage>. doi: <pub-id pub-id-type="doi">10.1080/01690965.2012.705006</pub-id>, PMID: <pub-id pub-id-type="pmid">39989647</pub-id></citation></ref>
<ref id="ref52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Micheyl</surname> <given-names>C.</given-names></name> <name><surname>Delhommeau</surname> <given-names>K.</given-names></name> <name><surname>Perrot</surname> <given-names>X.</given-names></name> <name><surname>Oxenham</surname> <given-names>A. J.</given-names></name></person-group> (<year>2006</year>). <article-title>Influence of musical and psychoacoustical training on pitch discrimination</article-title>. <source>Hear. Res.</source> <volume>219</volume>, <fpage>36</fpage>&#x2013;<lpage>47</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2006.05.004</pub-id>, PMID: <pub-id pub-id-type="pmid">16839723</pub-id></citation></ref>
<ref id="ref53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mishra</surname> <given-names>S. K.</given-names></name> <name><surname>Panda</surname> <given-names>M. R.</given-names></name> <name><surname>Raj</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>Influence of musical training on sensitivity to temporal fine structure</article-title>. <source>Int. J. Audiol.</source> <volume>54</volume>, <fpage>220</fpage>&#x2013;<lpage>226</lpage>. doi: <pub-id pub-id-type="doi">10.3109/14992027.2014.969411</pub-id>, PMID: <pub-id pub-id-type="pmid">25395259</pub-id></citation></ref>
<ref id="ref54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nie</surname> <given-names>P.</given-names></name> <name><surname>Wang</surname> <given-names>C.</given-names></name> <name><surname>Rong</surname> <given-names>G.</given-names></name> <name><surname>Du</surname> <given-names>B.</given-names></name> <name><surname>Lu</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Effects of music training on the auditory working memory of Chinese-speaking school-aged children: a longitudinal intervention study</article-title>. <source>Front. Psychol.</source> <volume>12</volume>:<fpage>770425</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2021.770425</pub-id>, PMID: <pub-id pub-id-type="pmid">35153898</pub-id></citation></ref>
<ref id="ref55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nilsson</surname> <given-names>M.</given-names></name> <name><surname>Soli</surname> <given-names>S. D.</given-names></name> <name><surname>Sullivan</surname> <given-names>J. A.</given-names></name></person-group> (<year>1994</year>). <article-title>Development of the hearing in noise test for the measurement of speech reception thresholds in quiet and in noise</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>95</volume>, <fpage>1085</fpage>&#x2013;<lpage>1099</lpage>. doi: <pub-id pub-id-type="doi">10.1121/1.408469</pub-id>, PMID: <pub-id pub-id-type="pmid">8132902</pub-id></citation></ref>
<ref id="ref56"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Noorden</surname> <given-names>L.V.</given-names></name></person-group> (<year>1975</year>). <source>Temporal coherence in the perception of tone sequences</source>. [PhD Thesis, Institute for Perception Research, Eindhoven]. Technische Hogeschool Eindhoven.</citation></ref>
<ref id="ref57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oechslin</surname> <given-names>M. S.</given-names></name> <name><surname>Imfeld</surname> <given-names>A.</given-names></name> <name><surname>Loenneker</surname> <given-names>T.</given-names></name> <name><surname>Meyer</surname> <given-names>M.</given-names></name> <name><surname>J&#x00E4;ncke</surname> <given-names>L.</given-names></name></person-group> (<year>2010</year>). <article-title>The plasticity of the superior longitudinal fasciculus as a function of musical expertise: a diffusion tensor imaging study</article-title>. <source>Front. Hum. Neurosci.</source> <volume>3</volume>:<fpage>76</fpage>. doi: <pub-id pub-id-type="doi">10.3389/neuro.09.076.2009</pub-id>, PMID: <pub-id pub-id-type="pmid">20161812</pub-id></citation></ref>
<ref id="ref58"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parbery-Clark</surname> <given-names>A.</given-names></name> <name><surname>Skoe</surname> <given-names>E.</given-names></name> <name><surname>Kraus</surname> <given-names>N.</given-names></name></person-group> (<year>2009a</year>). <article-title>Musical experience limits the degradative effects of background noise on the neural processing of sound</article-title>. <source>J. Neurosci.</source> <volume>29</volume>, <fpage>14100</fpage>&#x2013;<lpage>14107</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.3256-09.2009</pub-id>, PMID: <pub-id pub-id-type="pmid">19906958</pub-id></citation></ref>
<ref id="ref59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parbery-Clark</surname> <given-names>A.</given-names></name> <name><surname>Skoe</surname> <given-names>E.</given-names></name> <name><surname>Lam</surname> <given-names>C.</given-names></name> <name><surname>Kraus</surname> <given-names>N.</given-names></name></person-group> (<year>2009b</year>). <article-title>Musician enhancement for speech-in-noise</article-title>. <source>Ear Hear.</source> <volume>30</volume>, <fpage>653</fpage>&#x2013;<lpage>661</lpage>. doi: <pub-id pub-id-type="doi">10.1097/AUD.0b013e3181b412e9</pub-id>, PMID: <pub-id pub-id-type="pmid">19734788</pub-id></citation></ref>
<ref id="ref61"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Peretz</surname> <given-names>I.</given-names></name> <name><surname>Ayotte</surname> <given-names>J.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name> <name><surname>Mehler</surname> <given-names>J.</given-names></name> <name><surname>Ahad</surname> <given-names>P.</given-names></name> <name><surname>Penhune</surname> <given-names>V. B.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Congenital amusia: a disorder of fine-grained pitch discrimination</article-title>. <source>Neuron</source> <volume>33</volume>, <fpage>185</fpage>&#x2013;<lpage>191</lpage>. doi: <pub-id pub-id-type="doi">10.1016/s0896-6273(01)00580-3</pub-id>, PMID: <pub-id pub-id-type="pmid">11804567</pub-id></citation></ref>
<ref id="ref62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Preacher</surname> <given-names>K. J.</given-names></name> <name><surname>Hayes</surname> <given-names>A. F.</given-names></name></person-group> (<year>2004</year>). <article-title>SPSS and SAS procedures for estimating indirect effects in simple mediation models</article-title>. <source>Behav. Res. Methods Instrum. Comput.</source> <volume>36</volume>, <fpage>717</fpage>&#x2013;<lpage>731</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03206553</pub-id>, PMID: <pub-id pub-id-type="pmid">15641418</pub-id></citation></ref>
<ref id="ref63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Preacher</surname> <given-names>K. J.</given-names></name> <name><surname>Hayes</surname> <given-names>A. F.</given-names></name></person-group> (<year>2008</year>). <article-title>Asymptotic and resampling strategies for assessing and comparing indirect effects in multiple mediator models</article-title>. <source>Behav. Res. Methods</source> <volume>40</volume>, <fpage>879</fpage>&#x2013;<lpage>891</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BRM.40.3.879</pub-id>, PMID: <pub-id pub-id-type="pmid">18697684</pub-id></citation></ref>
<ref id="ref64"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Puschmann</surname> <given-names>S.</given-names></name> <name><surname>Baillet</surname> <given-names>S.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2019</year>). <article-title>Musicians at the cocktail party: neural substrates of musical training during selective listening in multispeaker situations</article-title>. <source>Cereb. Cortex</source> <volume>29</volume>, <fpage>3253</fpage>&#x2013;<lpage>3265</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhy193</pub-id>, PMID: <pub-id pub-id-type="pmid">30137239</pub-id></citation></ref>
<ref id="ref65"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rauschecker</surname> <given-names>J. P.</given-names></name> <name><surname>Scott</surname> <given-names>S. K.</given-names></name></person-group> (<year>2009</year>). <article-title>Maps and streams in the auditory cortex: nonhuman primates illuminate human speech processing</article-title>. <source>Nat. Neurosci.</source> <volume>12</volume>, <fpage>718</fpage>&#x2013;<lpage>724</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.2331</pub-id>, PMID: <pub-id pub-id-type="pmid">19471271</pub-id></citation></ref>
<ref id="ref66"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>R&#x00F6;nnberg</surname> <given-names>J.</given-names></name> <name><surname>Lunner</surname> <given-names>T.</given-names></name> <name><surname>Zekveld</surname> <given-names>A.</given-names></name> <name><surname>S&#x00F6;rqvist</surname> <given-names>P.</given-names></name> <name><surname>Danielsson</surname> <given-names>H.</given-names></name> <name><surname>Lyxell</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>The ease of language understanding (ELU) model: theoretical, empirical, and clinical advances</article-title>. <source>Front. Syst. Neurosci.</source> <volume>7</volume>:<fpage>31</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnsys.2013.00031</pub-id>, PMID: <pub-id pub-id-type="pmid">23874273</pub-id></citation></ref>
<ref id="ref9002"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schellenberg</surname> <given-names>E. G.</given-names></name></person-group> (<year>2015</year>). <article-title>Music training and speech perception: a gene-environment interaction</article-title>. <source>Annals of the New York Academy of Sciences</source>, <volume>1337</volume>, <fpage>170</fpage>&#x2013;<lpage>177</lpage>. doi: <pub-id pub-id-type="doi">10.1111/nyas.12627</pub-id>, PMID: <pub-id pub-id-type="pmid">25618067</pub-id></citation></ref>
<ref id="ref67"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shamma</surname> <given-names>S. A.</given-names></name> <name><surname>Micheyl</surname> <given-names>C.</given-names></name></person-group> (<year>2010</year>). <article-title>Behind the scenes of auditory perception</article-title>. <source>Curr. Opin. Neurobiol.</source> <volume>20</volume>, <fpage>361</fpage>&#x2013;<lpage>366</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.conb.2010.03.009</pub-id>, PMID: <pub-id pub-id-type="pmid">20456940</pub-id></citation></ref>
<ref id="ref68"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Skipper</surname> <given-names>J. I.</given-names></name> <name><surname>Devlin</surname> <given-names>J. T.</given-names></name> <name><surname>Lametti</surname> <given-names>D. R.</given-names></name></person-group> (<year>2017</year>). <article-title>The hearing ear is always found close to the speaking tongue: review of the role of the motor system in speech perception</article-title>. <source>Brain Lang.</source> <volume>164</volume>, <fpage>77</fpage>&#x2013;<lpage>105</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bandl.2016.10.004</pub-id>, PMID: <pub-id pub-id-type="pmid">27821280</pub-id></citation></ref>
<ref id="ref69"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>J.</given-names></name> <name><surname>Kraus</surname> <given-names>N.</given-names></name></person-group> (<year>2016</year>). <article-title>The role of rhythm in perceiving speech in noise: a comparison of percussionists, vocalists and non-musicians</article-title>. <source>Cogn. Process.</source> <volume>17</volume>, <fpage>79</fpage>&#x2013;<lpage>87</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10339-015-0740-7</pub-id>, PMID: <pub-id pub-id-type="pmid">26445880</pub-id></citation></ref>
<ref id="ref70"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slater</surname> <given-names>J.</given-names></name> <name><surname>Skoe</surname> <given-names>E.</given-names></name> <name><surname>Strait</surname> <given-names>D. L.</given-names></name> <name><surname>O'Connell</surname> <given-names>S.</given-names></name> <name><surname>Thompson</surname> <given-names>E.</given-names></name> <name><surname>Kraus</surname> <given-names>N.</given-names></name></person-group> (<year>2015</year>). <article-title>Music training improves speech-in-noise perception: longitudinal evidence from a community-based music program</article-title>. <source>Behav. Brain Res.</source> <volume>291</volume>, <fpage>244</fpage>&#x2013;<lpage>252</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bbr.2015.05.026</pub-id>, PMID: <pub-id pub-id-type="pmid">26005127</pub-id></citation></ref>
<ref id="ref71"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stenb&#x00E4;ck</surname> <given-names>V.</given-names></name> <name><surname>Marsja</surname> <given-names>E.</given-names></name> <name><surname>H&#x00E4;llgren</surname> <given-names>M.</given-names></name> <name><surname>Lyxell</surname> <given-names>B.</given-names></name> <name><surname>Larsby</surname> <given-names>B.</given-names></name></person-group> (<year>2022</year>). <article-title>Informational masking and listening effort in speech recognition in noise: the role of working memory capacity and inhibitory control in older adults with and without hearing impairment</article-title>. <source>J Speech Lang Hear Res</source> <volume>65</volume>, <fpage>4417</fpage>&#x2013;<lpage>4428</lpage>. doi: <pub-id pub-id-type="doi">10.1044/2022_JSLHR-21-00674</pub-id>, PMID: <pub-id pub-id-type="pmid">36283680</pub-id></citation></ref>
<ref id="ref72"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Swaminathan</surname> <given-names>J.</given-names></name> <name><surname>Mason</surname> <given-names>C. R.</given-names></name> <name><surname>Streeter</surname> <given-names>T. M.</given-names></name> <name><surname>Best</surname> <given-names>V.</given-names></name> <name><surname>Kidd</surname> <given-names>G.</given-names> <suffix>Jr.</suffix></name> <name><surname>Patel</surname> <given-names>A. D.</given-names></name></person-group> (<year>2015</year>). <article-title>Musical training, individual differences and the cocktail party problem</article-title>. <source>Sci. Rep.</source> <volume>5</volume>:<fpage>11628</fpage>. doi: <pub-id pub-id-type="doi">10.1038/srep11628</pub-id>, PMID: <pub-id pub-id-type="pmid">26112910</pub-id></citation></ref>
<ref id="ref73"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Talamini</surname> <given-names>F.</given-names></name> <name><surname>Alto&#x00E8;</surname> <given-names>G.</given-names></name> <name><surname>Carretti</surname> <given-names>B.</given-names></name> <name><surname>Grassi</surname> <given-names>M.</given-names></name></person-group> (<year>2017</year>). <article-title>Musicians have better memory than nonmusicians: a meta-analysis</article-title>. <source>PLoS One</source> <volume>12</volume>:<fpage>e0186773</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0186773</pub-id>, PMID: <pub-id pub-id-type="pmid">29049416</pub-id></citation></ref>
<ref id="ref74"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Thompson</surname> <given-names>S. K.</given-names></name> <name><surname>Carlyon</surname> <given-names>R. P.</given-names></name> <name><surname>Cusack</surname> <given-names>R.</given-names></name></person-group> (<year>2011</year>). <article-title>An objective measurement of the build-up of auditory streaming and of its modulation by attention</article-title>. <source>J. Exp. Psychol. Hum. Percept. Perform.</source> <volume>37</volume>, <fpage>1253</fpage>&#x2013;<lpage>1262</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0021925</pub-id>, PMID: <pub-id pub-id-type="pmid">21480747</pub-id></citation></ref>
<ref id="ref75"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wingfield</surname> <given-names>A.</given-names></name> <name><surname>Tun</surname> <given-names>P. A.</given-names></name></person-group> (<year>2007</year>). <article-title>Cognitive supports and cognitive constraints on comprehension of spoken language</article-title>. <source>J. Am. Acad. Audiol.</source> <volume>18</volume>, <fpage>548</fpage>&#x2013;<lpage>558</lpage>. doi: <pub-id pub-id-type="doi">10.3766/jaaa.18.7.3</pub-id></citation></ref>
<ref id="ref76"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Worschech</surname> <given-names>F.</given-names></name> <name><surname>Marie</surname> <given-names>D.</given-names></name> <name><surname>J&#x00FC;nemann</surname> <given-names>K.</given-names></name> <name><surname>Sinke</surname> <given-names>C.</given-names></name> <name><surname>Kr&#x00FC;ger</surname> <given-names>T. H. C.</given-names></name> <name><surname>Gro&#x00DF;bach</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Improved speech in noise perception in the elderly after 6 months of musical instruction</article-title>. <source>Front. Neurosci.</source> <volume>15</volume>:<fpage>696240</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2021.696240</pub-id>, PMID: <pub-id pub-id-type="pmid">34305522</pub-id></citation></ref>
<ref id="ref77"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yamasoba</surname> <given-names>T.</given-names></name> <name><surname>Lin</surname> <given-names>F. R.</given-names></name> <name><surname>Someya</surname> <given-names>S.</given-names></name> <name><surname>Kashio</surname> <given-names>A.</given-names></name> <name><surname>Sakamoto</surname> <given-names>T.</given-names></name> <name><surname>Kondo</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>Current concepts in age-related hearing loss: epidemiology and mechanistic pathways</article-title>. <source>Hear. Res.</source> <volume>303</volume>, <fpage>30</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2013.01.021</pub-id>, PMID: <pub-id pub-id-type="pmid">23422312</pub-id></citation></ref>
<ref id="ref78"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yoo</surname> <given-names>J.</given-names></name> <name><surname>Bidelman</surname> <given-names>G. M.</given-names></name></person-group> (<year>2019</year>). <article-title>Linguistic, perceptual, and cognitive factors underlying musicians' benefits in noise-degraded speech perception</article-title>. <source>Hear. Res.</source> <volume>377</volume>, <fpage>189</fpage>&#x2013;<lpage>195</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2019.03.021</pub-id>, PMID: <pub-id pub-id-type="pmid">30978607</pub-id></citation></ref>
<ref id="ref79"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yurgil</surname> <given-names>K. A.</given-names></name> <name><surname>Velasquez</surname> <given-names>M. A.</given-names></name> <name><surname>Winston</surname> <given-names>J. L.</given-names></name> <name><surname>Reichman</surname> <given-names>N. B.</given-names></name> <name><surname>Colombo</surname> <given-names>P. J.</given-names></name></person-group> (<year>2020</year>). <article-title>Music training, working memory, and neural oscillations: a review</article-title>. <source>Front. Psychol.</source> <volume>11</volume>:<fpage>266</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2020.00266</pub-id>, PMID: <pub-id pub-id-type="pmid">32153474</pub-id></citation></ref>
<ref id="ref9003"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2013</year>). <article-title>Predispositions and plasticity in music and speech learning: neural correlates and implications</article-title>. <source>Science</source>, <volume>342</volume>:<fpage>585</fpage>&#x2013;<lpage>589</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.1238414</pub-id>, PMID: <pub-id pub-id-type="pmid">31320656</pub-id></citation></ref>
<ref id="ref80"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Zatorre</surname> <given-names>R.</given-names></name></person-group> (<year>2024</year>). <source>From perception to pleasure: the neuroscience of music and why we love it</source>. <edition>online</edition> Edn. <publisher-name>New York: Oxford Academic</publisher-name>.</citation></ref>
<ref id="ref81"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zatorre</surname> <given-names>R. J.</given-names></name> <name><surname>Halpern</surname> <given-names>A. R.</given-names></name> <name><surname>Bouffard</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>Mental reversal of imagined melodies: a role for the posterior parietal cortex</article-title>. <source>J. Cogn. Neurosci.</source> <volume>22</volume>, <fpage>775</fpage>&#x2013;<lpage>789</lpage>. doi: <pub-id pub-id-type="doi">10.1162/jocn.2009.21239</pub-id>, PMID: <pub-id pub-id-type="pmid">19366283</pub-id></citation></ref>
<ref id="ref82"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zendel</surname> <given-names>B. R.</given-names></name> <name><surname>West</surname> <given-names>G. L.</given-names></name> <name><surname>Belleville</surname> <given-names>S.</given-names></name> <name><surname>Peretz</surname> <given-names>I.</given-names></name></person-group> (<year>2019</year>). <article-title>Musical training improves the ability to understand speech-in-noise in older adults</article-title>. <source>Neurobiol. Aging</source> <volume>81</volume>, <fpage>102</fpage>&#x2013;<lpage>115</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2019.05.015</pub-id>, PMID: <pub-id pub-id-type="pmid">31280114</pub-id></citation></ref>
<ref id="ref83"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>L.</given-names></name> <name><surname>Fu</surname> <given-names>X.</given-names></name> <name><surname>Luo</surname> <given-names>D.</given-names></name> <name><surname>Xing</surname> <given-names>L.</given-names></name> <name><surname>Du</surname> <given-names>Y.</given-names></name></person-group> (<year>2021</year>). <article-title>Musical experience offsets age-related decline in understanding speech-in-noise: type of training does not matter, working memory is the key</article-title>. <source>Ear Hear.</source> <volume>42</volume>, <fpage>258</fpage>&#x2013;<lpage>270</lpage>. doi: <pub-id pub-id-type="doi">10.1097/AUD.0000000000000921</pub-id>, PMID: <pub-id pub-id-type="pmid">32826504</pub-id></citation></ref>
<ref id="ref84"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zuk</surname> <given-names>J.</given-names></name> <name><surname>Andrade</surname> <given-names>P. E.</given-names></name> <name><surname>Andrade</surname> <given-names>O. V.</given-names></name> <name><surname>Gardiner</surname> <given-names>M.</given-names></name> <name><surname>Gaab</surname> <given-names>N.</given-names></name></person-group> (<year>2013</year>). <article-title>Musical, language, and reading abilities in early Portuguese readers</article-title>. <source>Front. Psychol.</source> <volume>4</volume>:<fpage>288</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2013.00288</pub-id>, PMID: <pub-id pub-id-type="pmid">23785339</pub-id></citation></ref>
</ref-list>
</back>
</article>