<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="brief-report">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2025.1605800</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Brief Research Report</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Figure&#x02013;ground relationship of voices in musical structure modulates reciprocal frontotemporal connectivity</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Kim</surname> <given-names>Chan Hee</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/1235086"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Seo</surname> <given-names>Jeong-Eun</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Seol</surname> <given-names>Jaeho</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/14669"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Chung</surname> <given-names>Chun Kee</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/97472"/>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Physiology and Neuroscience, Dental Research Institute, Seoul National University School of Dentistry</institution>, <city>Seoul</city>, <country country="kp">Republic of Korea</country></aff>
<aff id="aff2"><label>2</label><institution>Human Brain Function Laboratory, Neuroscience Research Institute, Seoul National University</institution>, <city>Seoul</city>, <country country="kp">Republic of Korea</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Musicology, Seoul National University</institution>, <city>Seoul</city>, <country country="kp">Republic of Korea</country></aff>
<aff id="aff4"><label>4</label><institution>Neuroscience Research Institute, Seoul National University Medical Research Center</institution>, <city>Seoul</city>, <country country="kp">Republic of Korea</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Chan Hee Kim, <email xlink:href="mailto:chanypia@snu.ac.kr">chanypia@snu.ac.kr</email>; Chun Kee Chung, <email xlink:href="mailto:chungc@snu.ac.kr">chungc@snu.ac.kr</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-13">
<day>13</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>19</volume>
<elocation-id>1605800</elocation-id>
<history>
<date date-type="received">
<day>04</day>
<month>04</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>13</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>22</day>
<month>12</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Kim, Seo, Seol and Chung.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Kim, Seo, Seol and Chung</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-13">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>When listening to polyphonic music, we often perceive a melody as the figure against the ground of accompanying sounds. However, with repeated exposure, this figure&#x02013;ground relationship may naturally shift, allowing the melody to recede into the ground. In a previous study, we found the consistent pattern of frontotemporal connectivity for the &#x0201C;Twinkle, Twinkle, Little Star&#x0201D; (TTLS) melody in the headings of two <italic>Variations (II</italic> and <italic>IV)</italic> in Mozart&#x00027;s 12 Variations, K. 265, indicating that the TTLS melody, but not the different lower voices, was the figure. However, the frontotemporal connectivity pattern may change in the same phrases repeating in the two variations. In the current study, we examined how frontotemporal connectivity changes in the repeated phrases. In the results, the frontotemporal connectivity pattern between the two variations changed in the final phrase after repeated passages. This suggests that the shift in the figure&#x02013;ground relationship persists, with the TTLS melody becoming less prominent while the lower voices become relatively more prominent. Additionally, frontotemporal connectivity was strongly correlated with temporofrontal connectivity in the opposite direction. Finally, our data indicate that TTLS melody-based and sensory-based processes in response to a switched figure&#x02013;ground relationship, are incorporated into the bidirectional connections between frontotemporal and temporofrontal connectivity. Our study highlights the brain&#x00027;s ability to reconfigure figure&#x02013;ground relationships in the processing of musical voices.</p></abstract>
<kwd-group>
<kwd>figure&#x02013;ground perception</kwd>
<kwd>musical structure</kwd>
<kwd>musical voices</kwd>
<kwd>effective connectivity</kwd>
<kwd>inferior frontal gyrus</kwd>
<kwd>superior temporal gyrus</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was supported by Samsung Research Funding &#x00026; Incubation Center for Future Technology (SRFC-IT1902-08, Decoding Inner Music Using Electrocorticography), and Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Science &#x00026; ICT (NRF-2021R1A4A200180312) and the Ministry of Education (RS-2022-NR075566).</funding-statement>
</funding-group>
<counts>
<fig-count count="3"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="75"/>
<page-count count="9"/>
<word-count count="7413"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Perception Science</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>Introduction</title>
<p>Humans can identify a specific melody within homophonic and polyphonic music because it often appears in a higher pitch range, making it easily distinguishable (<xref ref-type="bibr" rid="B13">Fujioka et al., 2005</xref>; <xref ref-type="bibr" rid="B64">Trainor et al., 2014</xref>). This phenomenon aligns with the figure&#x02013;ground concept in Gestalt psychology (<xref ref-type="bibr" rid="B23">K&#x000F6;hler, 1967</xref>; <xref ref-type="bibr" rid="B70">Wagemans et al., 2012</xref>). Similar to a visual stimulus (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 1</xref>), the melody can serve as the &#x0201C;figure,&#x0201D; while other voices constitute the &#x0201C;ground.&#x0201D; Additionally, listeners can sometimes shift their attention during a phrase, perceiving the melody as the background while other voices being dominant instead (<xref ref-type="bibr" rid="B42">Ragert et al., 2014</xref>; <xref ref-type="bibr" rid="B11">Deutch, 2019</xref>). However, even when the figure&#x02013;ground relationship favors the figure as the more perceptually dominant voice, the ground can remain perceptible (<xref ref-type="bibr" rid="B5">Bigand et al., 2000</xref>), and vice versa.</p>
<p>Musical structure, including pitch, tonality, and harmony, is learned through experience, understanding of musical structure facilitates the recognition and anticipation of patterns in familiar pieces (<xref ref-type="bibr" rid="B30">Narmour, 2000</xref>; <xref ref-type="bibr" rid="B61">Tillmann et al., 2000</xref>). When a familiar melody appears in a musical piece, it is easily recognized the figure in the musical structure. However, repeated exposure to the melody may alter the figure&#x02013;ground relationship between the upper and lower voices (<xref ref-type="bibr" rid="B56">Taher et al., 2016</xref>), and this change may eventually lead to the natural collapse of the figure&#x02013;ground relationship centered on the upper voice of the familiar melody.</p>
<p>In our previous study using Mozart&#x00027;s 12 Variations, K. 265 (<xref ref-type="bibr" rid="B22">Kim et al., 2020</xref>), we observed that only frontotemporal connectivity between the left Heschl&#x00027;s gyrus (HG) and left inferior frontal gyrus (IFG) changed in response to the presence or absence of the &#x0201C;Twinkle, Twinkle, Little Star&#x0201D; (TTLS) melody of &#x0201C;C5-C5-G5-G5-A5.&#x0201D; This connectivity pattern for the TTLS was observed across a target phrase (T) of 2.1 s at the beginning of each variation. However, if the figure&#x02013;ground relationship shifts after repetitions, the connectivity strength can become inconsistent (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 1C</xref>). The present study examined how frontotemporal connectivity for <italic>Variations II</italic> and <italic>IV</italic> (<xref ref-type="fig" rid="F1">Figure 1</xref>) changes across four target phrases (T1&#x02013;T4) featuring the TTLS melody. We hypothesized that: (1) If the connectivity pattern does not differ significantly between <italic>Variations II</italic> and <italic>IV</italic>, the TTLS melody remains the figure, with the lower voice serving as the ground; and (2) If the connectivity pattern differs significantly after repetitions, the TTLS melody may not be the sole figure, as the lower voices influence its prominence.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Musical stimuli. <bold>(A)</bold> In Mozart&#x00027;s 12 Variations, K. 265, the TTLS melody in the theme is modified in <italic>Variations I</italic> and <italic>III</italic> but not in <italic>Variations II</italic> or <italic>IV</italic>. The rhythmic appearance is the same within the pair of <italic>Variations I</italic> and <italic>II</italic> or that of <italic>Variations III</italic> and <italic>IV</italic>. <bold>(B)</bold> The left panel illustrates the structure of each variation involving repeat signs on the score. The right panel depicts the entire structure, comprising 48 measures of A (a &#x0002B; a) &#x0002B; B (b &#x0002B; a&#x00027;) &#x0002B; B (b &#x0002B; a&#x00027;), as it is played. White and black squares denote TTLS and cue melodies, respectively, repeated four times per variation. <bold>(C)</bold> The target phrases (T1-T4) are highlighted using green and orange shaded boxes with the &#x0201C;C5-C5-G5-G5-A5&#x0201D; melody marked with white-lined stars. Both <italic>Variations II</italic> and <italic>IV</italic> have two streams of upper and lower voices. The lower voices in the target phrases show melodic and rhythmic variations on the theme, but the upper voice remains consistent. Details about the formation of the adapted and the full scores are shown in <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 3</xref>. Musical scores of <italic>Variation II</italic> and <italic>IV</italic> were adapted from NMA Online: Neue Mozart-Ausgabe: Digitized Version (<ext-link ext-link-type="uri" xlink:href="https://dme.mozarteum.at/DME/nma/nmapub_srch.php?l=2">https://dme.mozarteum.at/DME/nma/nmapub_srch.php?l=2</ext-link>). TTLS, Twinkle Twinkle Little Star; T, target phrase.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1605800-g0001.tif">
<alt-text content-type="machine-generated">Diagram illustrating musical structure and variations. Part A shows progression from Theme to Variations I-IV with upper and lower voice modifications. Part B details musical measures with stars denoting melody and squares as cue melody across total 48 measures, labeled T1-T4. Part C visually represents Variations II and IV with musical notes and corresponding schematic showing upper and lower voice patterns using stars (melody) and squares (cue). Variations II highlighted in green and IV in yellow.</alt-text>
</graphic>
</fig>
</sec>
<sec sec-type="materials|methods" id="s2">
<title>Materials and methods</title>
<sec>
<title>Participants</title>
<p>In magnetoencephalography (MEG) recording, participants comprised 25 healthy individuals, all non-musicians, 15 women and 10 men with a mean age of 26.8 &#x000B1; 3.4 years old. None had received formal musical training. All participants were right-handed, with a mean Edinburg Handedness coefficient of 95.7 &#x000B1; 7.1. The study adhered to the principles of the Declaration of Helsinki and received approval from the Institutional Review Board of the Clinical Research Institute at Seoul National University Hospital (IRB No. C-1003-015-311). The research procedures adhered to relevant ethical guidelines and regulations. All participants provided informed, written consent after receiving a clear explanation of the study&#x00027;s purpose, procedures, potential risks, and benefits.</p>
</sec>
<sec>
<title>Stimuli</title>
<p>Mozart&#x00027;s K. 265 consists of the theme &#x0201C;<italic>Ah! Vous dirai-je Maman</italic>&#x0201D; and 12 variations (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 2</xref>). <italic>Variations I&#x02013;IV</italic> contain rhythmic, melodic, and textural variations on the theme. Relative to the theme, the rhythmic patterns in the upper voices are transformed in <italic>Variations I</italic> and <italic>III</italic> and moved to the lower voices in <italic>Variations II</italic> and <italic>IV</italic>, sharing the TTLS melody (<xref ref-type="fig" rid="F1">Figure 1A</xref>). This study focused on <italic>Variations II</italic> and <italic>IV</italic>, which share the TTLS melody but have differing lower parts, transforming through rhythmic changes to 8th note triplets and 16th notes (semiquavers), respectively. The tonality and harmonic structure remain the same for both variations. Each variation is based on the ternary form of A (a &#x0002B; a) &#x0002B; B (b &#x0002B; a&#x00027;) &#x0002B; B (b &#x0002B; a&#x00027;). Phrases, including the &#x0201C;C5-C5-G5-G5-A5&#x0201D; melody, were repeated four times in each variation (<xref ref-type="fig" rid="F1">Figure 1B</xref>). In this study, the term &#x0201C;<italic>variation</italic>&#x0201D; refers both to the musical form and to individual movements within that form, such as <italic>Variation II</italic> and <italic>Variation IV</italic>.</p>
</sec>
<sec>
<title>Recording</title>
<p>In a magnetically shielded room, the participants listened to Mozart&#x00027;s K. 265 while watching a silent movie clip (Love Actually, 2003, Universal Pictures, USA) for approximately 5 min. Musical stimuli were generated using STIM<sup>2&#x02122;</sup> (Neuroscan, Charlotte, NC, USA) and presented binaurally at 100 dB through MEG-compatible earphones (Tip-300, Nicolet, Madison, WI, USA). MEG signals were recorded using a 306-channel whole-head MEG system (Elekta Neuromag Vector View&#x02122;, Helsinki, Finland) with a sampling frequency of 1,000 Hz and a bandpass filter of 0.1&#x02013;200 Hz. The environmental magnetic noise in raw MEG signals was eliminated using the temporal signal space separation algorithm (<xref ref-type="bibr" rid="B60">Tesche et al., 1995</xref>; <xref ref-type="bibr" rid="B57">Taulu and Hari, 2009</xref>) implemented in MaxFilter 2.1.13 (Elekta Neuromag Oy, Helsinki, Finland). Electrooculogram, electrocardiogram, and muscle artifacts were also removed using independent component analysis. Participants did not perform behavioral tasks related to attentional shifts between voices during or after MEG recording. They also received no instructions regarding attentional focus during MEG recording.</p>
</sec>
<sec>
<title>Analysis</title>
<sec>
<title>MEG source analysis</title>
<p>The MEG source signals of epochs from &#x02212;100 to 2,100 ms after the onset of each condition for four regional sources of bilateral HGs and IFGs, bandpass filtered at 14&#x02013;30 Hz, were extracted using BESA 5.1.8.10 (MEGIS Software GmbH, Gr&#x000E4;felfing, Germany) after electrooculograms, electrocardiograms, and muscle artifacts were removed. Standard Talairach coordinates (x, y, and z in mm) for bilateral HGs (transverse, BA 41, BA 42) and IFGs (triangular part, BA 45) across participants were adapted from previous research (<xref ref-type="bibr" rid="B22">Kim et al., 2020</xref>). The coordinates were as follows: left HG (&#x02212;53.5, &#x02212;30.5, and 12.6), right HG (55.4, &#x02212;30.5, and 12.6), left IFG (&#x02212;55.5, 11.7, and 20.6), and right IFG (53.5, 12.7, and 20.6).</p></sec>
<sec>
<title>Time window</title>
<p><italic>Variations II</italic> and <italic>IV</italic> were chosen as the two conditions for estimating connectivity differences, including the identical TTLS melody. Each Time window was 2,100 ms long, incorporating the &#x0201C;C5-C5-G5-G5-A5&#x0201D; melody, as in a previous study (<xref ref-type="bibr" rid="B22">Kim et al., 2020</xref>). Time windows of 2,100 ms appeared four times per variation, labeled as T1, T2, T3, and T4 (<xref ref-type="fig" rid="F1">Figure 1C</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Figures 2</xref>, <xref ref-type="supplementary-material" rid="SM1">3</xref>). The lower voices accompanied by the &#x0201C;C5-C5-G5-G5-A5&#x0201D; melody differed between the two variations.</p></sec>
<sec>
<title>LTDMI analysis</title>
<p>Effective connectivity across target phrases between the two variations was measured using linearized time-delayed mutual information (LTDMI; <xref ref-type="bibr" rid="B18">Jin et al., 2010</xref>; <xref ref-type="bibr" rid="B20">Kim et al., 2021</xref>), a measure used in our previous study (<xref ref-type="bibr" rid="B22">Kim et al., 2020</xref>). LTDMI estimates the directionality of information transmission between the time series of two regional sources, enabling the observation of interhemispheric and interregional connectivity, which is essential for processing musical elements in bilateral IFGs and HGs. While our primary focus was the regional connection from the left IFG to the right HG, we verified our results for all connections among the bilateral IFGs and HGs. The effective connectivity for the 12 connections between regional sources of the bilateral HGs and IFGs was estimated using MATLAB 7.7.0.471 (Math Works Inc., Natick, MA, USA; see also <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref> for individual LTDMI values calculated for 12 connections). For each subject, the mean LTDMI for the 2,100-ms epoch was calculated for each of 4 target phrases (T1, T2, T3, and T4) &#x000D7; 2 variations (<italic>Variations II</italic> and <italic>IV</italic>).</p>
</sec>
</sec>
<sec>
<title>Statistics</title>
<p>Statistical comparisons of mean LTDMI values for <italic>Variations II</italic> and <italic>IV</italic> were performed using SPSS 21.0 software (IBM, Armonk, NY, USA). For the mean LTDMI values in four target phrases, we conducted the non-parametric Wilcoxon signed-rank test due to the non-Gaussian distribution of LTDMI data. In each case, the significance level (&#x003B1;) for rejecting the null hypothesis (H0, indicating no difference between <italic>Variation II</italic> and <italic>Variation IV</italic> in the mean LTDMI values), was 0.05. In addition, in the nonparametric Spearman correlation test for each pair between the frontotemporal connectivity difference value [Left IFG &#x02192; Right HG<sub>(Variation IV&#x02212;<italic>Variation II</italic>)</sub>] and the other 11 connectivity difference values, except of Left IFG &#x02192; Right HG among 12 connections between the bilateral IFGs and HGs, the Type I errors that were caused by multiple comparisons among the 11 connection pairs in the Spearman correlation test were adjusted by the Bonferroni test.</p></sec></sec>
<sec sec-type="results" id="s3">
<title>Results</title>
<sec>
<title>LTDMI differences between two variations for four target phrases</title>
<p>The frontotemporal connectivity from the left IFG to the right HG between <italic>Variations II</italic> and <italic>IV</italic> was calculated for four target phrases (T1&#x02013;T4), each repeated four times per variation (<xref ref-type="fig" rid="F1">Figure 1</xref>). We independently performed a Wilcoxon signed-rank test for the LTDMI values of each target phrase to confirm the changes in the frontotemporal connectivity between the two variations. The difference between the two variations was significant only in T4 among four target phrases, as indicated by the Wilcoxon signed-rank test (<italic>Z</italic> = &#x02212;2.112, <italic>P</italic> = 0.035; <xref ref-type="fig" rid="F2">Figure 2A</xref>). In T4, frontotemporal connectivity was enhanced in <italic>Variation IV</italic> compared with <italic>Variation II</italic>. However, significant differences were not observed in T1&#x02013;T3 (<italic>P</italic> &#x0003E; 0.05 in all cases). In addition, we confirmed that, among 12 connections between the bilateral IFGs and HGs, the only significant result corresponded specially to frontotemporal connectivity from the left IFG to the right HG (<xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref>). We observed a near-significant effect in temporofrontal connectivity from the right HG to the left IFG, in the opposite direction of frontotemporal connectivity (Right HG &#x02192; Left IFG, <italic>Z</italic> = &#x02212;1.843, <italic>P</italic> = 0.065; <xref ref-type="fig" rid="F2">Figure 2A</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref>), which was not initially predicted in our hypothesis. The significance level (&#x003B1;) for the null hypothesis was independently tested for each target phrase (T1&#x02013;T4), since the target phrases of T1&#x02013;T4 existed in completely different musical contexts within the formal structure of ternary form. Additionally, comparisons between target phrases within a variation were not considered as a hypothesis.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Changes in LTDMI values for four target phrases. <bold>(A)</bold> Frontotemporal connectivity of <italic>Variation IV</italic> was significantly enhanced compared with that of <italic>Variation II</italic> only during T4 (Wilcoxon signed-rank test, <italic>Z</italic> = &#x02212;2.112, <italic>P</italic> = 0.035). The temporofrontal connectivity (right HG &#x02192; left IFG) of <italic>Variation IV</italic> was also enhanced relative to that of <italic>Variation II</italic> only at T4, although this did not reach the level of statistical significance (Wilcoxon signed-rank test, <italic>Z</italic> = &#x02212;1.843, <italic>P</italic> = 0.065). For both frontotemporal and temporofrontal connectivity, there were no significant differences from T1 to T3 (<italic>P</italic> &#x0003E; 0.05 in all cases; Wilcoxon signed-rank test). Error bar denotes the standard error mean. &#x0002A;, <italic>P</italic> &#x0003C; 0.05; &#x0002B;, <italic>P</italic> = 0.65. <bold>(B)</bold> Frontotemporal connectivity (Left IFG &#x02192; Right HG) was strongly positively correlated with the temporofrontal connectivity (Right HG &#x02192; Left IFG) only during T4 (<xref ref-type="supplementary-material" rid="SM1">Supplementary Table 2</xref>). There was a significant correlation between Left IFG &#x02192; Right HG<sub>(Variation IV&#x02212;<italic>Variation II</italic>)</sub> and Right HG &#x02192; Left IFG<sub>(Variation IV&#x02212;<italic>Variation II</italic>)</sub> at T4 (Spearman correlation, Spearman&#x00027;s rho = 0.759, Bonferroni-corrected <italic>P</italic> = 0.0001). There were no significant differences from T1 to T3 (<italic>P</italic> &#x0003E; 0.05 in all cases; Spearman correlation; see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 4</xref>). &#x0201C;<italic>Variation IV</italic>&#x02013;<italic>Variation II</italic>&#x0201D; denotes a difference between <italic>Variation IV</italic> and <italic>Variation II</italic> for the LTDMI value. LTDMI, linearized time delayed mutual information; HG, Heschl&#x00027;s gyrus; IFG, inferior frontal gyrus; T, target phrase.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1605800-g0002.tif">
<alt-text content-type="machine-generated">Diagram featuring two parts, A and B. Part A shows brain illustrations and bar charts comparing LTDMI from the left IFG to the right HG and from the right HG to the left IFG across four time points (T1&#x02013;T4). Part B shows a scatter plot illustrating the correlation between LTDMI values from the left IFG to the right HG and from the right HG to the left IFG in the target phrase of T4, with a Spearman&#x02019;s rho of 0.759. Non-significant results in target phrases T1&#x02013;T3 are marked with gray lined bars.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<title>Correlation between frontotemporal and temporofrontal connectivity</title>
<p>Correlation analyses were conducted to confirm (1) whether a similar pattern between frontotemporal and temporofrontal connectivity refers to bidirectional information transmission between the left IFG and the right HG and (2) whether a similar pattern is only specialized in the temporofrontal connectivity (Right HG &#x02192; Left IFG) among 12 connections between the bilateral IFGs and HGs, which are key areas for the music process. To perform this estimation, we first computed the difference values between <italic>Variations II</italic> and <italic>Variation IV</italic> for the LTDMI values in 12 connections between the bilateral IFGs and HGs for all target phrases of T1&#x02013;T4. Next, we estimated the correlation between the frontotemporal connectivity difference value [Left IFG &#x02192; Right HG<sub>(Variation IV&#x02212;<italic>Variation II</italic>)</sub>], with 11 other connectivity difference values. In the Spearman correlation test result, a significant correlation was only observed between Left IFG &#x02192; Right HG<sub>(Variation IV&#x02212;<italic>Variation II</italic>)</sub> and Right HG &#x02192; Left IFG<sub>(Variation IV&#x02212;<italic>Variation II</italic>)</sub> for T4, among 44 combinations of 11 connections &#x000D7; 4 target phrases (Spearman&#x00027;s rho = 0.759, Bonferroni-corrected <italic>P</italic> = 0.0001; <xref ref-type="fig" rid="F2">Figure 2B</xref>, <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 4</xref>, and <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 2</xref>). The frontotemporal connectivity (Left IFG &#x02192; Right HG) was strongly positively correlated with the temporofrontal connectivity (Right HG &#x02192; Left IFG), reflecting similar information processing in <italic>Variation II</italic> and <italic>IV</italic>.</p></sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>Discussion</title>
<p>A difference in frontotemporal connectivity from the left IFG to the right HG between the two variations was only observed in the final target phrase of T4 and not in the preceding three phrases (<xref ref-type="fig" rid="F2">Figure 2A</xref>). As we hypothesized, frontotemporal connectivity showed inconsistency in the figure&#x02013;ground relationship between the two variations of the TTLS melody in a repeated phrase of T4. This indicates that the perceptual dominance of the TTLS melody in voice perception was weakened. Each variation included cue phrases such as &#x0201C;Up above the world&#x02026;,&#x0201D; predicting the recurrence of the TTLS melody (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 2</xref>). The two musical structures&#x02014;variation and ternary forms&#x02014;establish global and regional contexts, respectively. In each variation, T4 is introduced after the second iteration of the cue phrase within the regional ternary context, facilitating anticipation of melodic recurrence. Moreover, at the global level of the variation form, the same structure involving T4 is repeated in <italic>Variation II</italic> and <italic>Variation IV</italic>, further enhancing anticipatory processing. The training of the repeated upper voice and its perceptual prominence might have facilitated participants&#x00027; recognition of the lower voice (<xref ref-type="bibr" rid="B56">Taher et al., 2016</xref>). The LTDMI value was higher in <italic>Variation IV</italic> than in <italic>Variation II</italic> during T4. We interpret that the connectivity reduction in <italic>Variation II</italic> for T4 is attributable to the properties of its lower voice, which differed from them in <italic>Variation IV</italic> (<xref ref-type="fig" rid="F3">Figure 3</xref>). Our findings show that participants did not solely focus on the TTLS melody at T4 but could also detect other sounds.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Figure&#x02013;ground relationship between voices according to frontotemporal and temporofrontal connectivity changes. The figure shows how connectivity between the left IFG and the right HG changes from T1 to T4 when the same passages are repeated in each variation. Frontotemporal connectivity from the left IFG to the right HG, the TTLS connectivity, is consistent from T1 to T3, focusing on the figure of the upper voice. However, at T4, a different frontotemporal connectivity pattern is exhibited between <italic>Variations II</italic> and <italic>IV</italic>. This indicates that frontotemporal connectivity may no longer be related to the TTLS melody but rather to the lower voice, reflecting a shift away from TTLS-specific connectivity. Lower voices, masked by the perceptual dominance of the TTLS melody until T3, may have become audible alongside the TTLS melody at T4. However, because these results do not demonstrate whether figure and ground are perceptually separated or integrated, predictable bidirectional processes are represented using overlapping circles and stars. HG, Heschl&#x00027;s gyrus; IFG, inferior frontal gyrus; T, target phrase; TTLS, Twinkle Twinkle Little Star.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1605800-g0003.tif">
<alt-text content-type="machine-generated">Diagram illustrating the structures of Variations II and IV, each with upper- and lower-voice sequences. Symbols include stars, squares, and circles connected by lines. Target phrases T1&#x02013;T4 are marked. Below, three sections labeled a, b, and c reference the left IFG and right HG brain regions, indicating neural activity patterns.</alt-text>
</graphic>
</fig>
<p>In our previous study (<xref ref-type="bibr" rid="B22">Kim et al., 2020</xref>), unidirectional transmission of information from the left IFG to the right HG, showing frontotemporal connectivity, was associated with the recognition of the TTLS melody in the heading of each variation. However, the involvement of temporofrontal connectivity, which was in the opposite direction to the frontotemporal connectivity, was observed in repeated phrases in musical context evoked (<xref ref-type="fig" rid="F2">Figure 2B</xref>). The temporofrontal connectivity was strongly correlated with the frontotemporal connectivity in T4 (<xref ref-type="fig" rid="F2">Figure 2B</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 2</xref>). The top&#x02013;down processing of a familiar melody can significantly influence the figure&#x02013;ground relationship (<xref ref-type="bibr" rid="B53">Str&#x000FC;ber and Stadler, 1999</xref>; <xref ref-type="bibr" rid="B31">Nelson and Palmer, 2007</xref>). Considering that the roles of frontotemporal and temporofrontal connectivity are linked to the TTLS melody-based and sensory-based processes, respectively, this heightened connectivity could indicate a dual process: extracting the novel lower voice in the target phrase, including the familiar TTLS melody, and dissecting the components within the novel lower voices. The temporofrontal connectivity supports the frontotemporal connectivity. Thus, bidirectional connectivity of the frontotemporal and temporofrontal pathways between the left IFG and the right HG is possibly modulated by both a top&#x02013;down process based on knowledge of the TTLS melody and a bottom&#x02013;up process based on new information on the voices accumulated while sequentially listening to target and cue phrases in each variation (<xref ref-type="bibr" rid="B1">Alho et al., 2015</xref>; <xref ref-type="bibr" rid="B12">Dzafic et al., 2021</xref>).</p>
<p>Numerous researches on the auditory figure&#x02013;ground relationship have conducted auditory scene analysis (<xref ref-type="bibr" rid="B6">Bregman, 1994</xref>) and grouping (<xref ref-type="bibr" rid="B70">Wagemans et al., 2012</xref>) using tasks that discriminate a sound pattern as a figure from a ground of tone and chord sequences with irregularities in spectral and temporal properties (<xref ref-type="bibr" rid="B58">Teki et al., 2011</xref>; <xref ref-type="bibr" rid="B32">O&#x00027;Sullivan et al., 2015</xref>; <xref ref-type="bibr" rid="B63">Toth et al., 2016</xref>). Functional magnetic resonance imaging and electroencephalography studies (<xref ref-type="bibr" rid="B58">Teki et al., 2011</xref>; <xref ref-type="bibr" rid="B32">O&#x00027;Sullivan et al., 2015</xref>; <xref ref-type="bibr" rid="B63">Toth et al., 2016</xref>) have reported that regions involved in discriminating this figure&#x02013;ground perception comprise the primary auditory area, superior temporal sulcus, superior temporal gyrus, intraparietal sulcus, medial/superior frontal gyrus, and cingulate cortex. The processing of multiple voices in this study involved the IFG and the HG, both exhibiting information transmission. The left IFG is crucial in processing familiarity (<xref ref-type="bibr" rid="B37">Plailly et al., 2007</xref>) and in music-syntactic processing, indicating implicit learning (<xref ref-type="bibr" rid="B46">Sammler et al., 2011</xref>) and contributing to memory retrieval (<xref ref-type="bibr" rid="B38">Platel et al., 2003</xref>; <xref ref-type="bibr" rid="B71">Watanabe et al., 2008</xref>). Moreover, the left IFG is involved in differentiating between melody and accompaniment (<xref ref-type="bibr" rid="B51">Spada et al., 2014</xref>). The left IFG is highly activated during the recognition of pattern deviations in melody (<xref ref-type="bibr" rid="B14">Habermeyer et al., 2009</xref>), musical novelty in a context (<xref ref-type="bibr" rid="B62">Tillmann et al., 2003</xref>), and conscious experience (<xref ref-type="bibr" rid="B72">Weilnhammer et al., 2021</xref>). The left IFG is also associated with sematic and syntactic processing (<xref ref-type="bibr" rid="B75">Zhu et al., 2022</xref>), and sentence comprehension (<xref ref-type="bibr" rid="B68">van der Burght et al., 2019</xref>). In contrast, the right HG predominantly processes tone deviance (<xref ref-type="bibr" rid="B45">Sabri et al., 2006</xref>; <xref ref-type="bibr" rid="B29">Nan and Friederici, 2013</xref>) and the segregation of auditory streams (<xref ref-type="bibr" rid="B50">Snyder et al., 2006</xref>). The right auditory cortex is the dominant site for music processing (<xref ref-type="bibr" rid="B35">Perani et al., 2010</xref>), auditory stream segregation (<xref ref-type="bibr" rid="B50">Snyder et al., 2006</xref>), and spectral pitch (<xref ref-type="bibr" rid="B47">Schneider et al., 2005</xref>).</p>
<p>The IFG and HG are pivotal areas for music perception, and their connectivity is discussed in relation to syntax processes (<xref ref-type="bibr" rid="B34">Papoutsi et al., 2011</xref>; <xref ref-type="bibr" rid="B21">Kim et al., 2019</xref>, <xref ref-type="bibr" rid="B20">2021</xref>), categorization (<xref ref-type="bibr" rid="B43">Roswandowitz et al., 2021</xref>), and the working memory of the melody process (<xref ref-type="bibr" rid="B7">Burunat et al., 2014</xref>). The temporofrontal network is engaged in the categorization of vocal signals (<xref ref-type="bibr" rid="B43">Roswandowitz et al., 2021</xref>), while frontotemporal connection is involved in both top-down and bottom-up processes in sensory learning (<xref ref-type="bibr" rid="B12">Dzafic et al., 2021</xref>). Directional information flows within frontotemporal connectivity may therefore explain how the left IFG and the right HG collaborate in processing target phrases. The involvement of the left IFG and right HG may reflect the entire process of naturally grasping, comparing, and understanding the voices in target phrases within a particular context rather than simply perceiving them as sounds. Accordingly, enhanced frontotemporal and temporofrontal connectivity may reflect the integrated processes by which the brain recognizes the TTLS melody based on memory, segregates the melody, and detects differences in the lower voices relative to the prior context.</p>
<p>Previous studies have selectively manipulated stimuli or directed participants&#x00027; attention to specific auditory streams (<xref ref-type="bibr" rid="B66">Uhlig et al., 2013</xref>; <xref ref-type="bibr" rid="B42">Ragert et al., 2014</xref>; <xref ref-type="bibr" rid="B51">Spada et al., 2014</xref>; <xref ref-type="bibr" rid="B52">Strait et al., 2015</xref>; <xref ref-type="bibr" rid="B15">Hausfeld et al., 2018</xref>; <xref ref-type="bibr" rid="B40">Puschmann et al., 2019</xref>; <xref ref-type="bibr" rid="B3">Barrett et al., 2021</xref>). Attention has been shown to be critical for figure&#x02013;ground perception (<xref ref-type="bibr" rid="B39">Poort et al., 2012</xref>). Therefore, research on such perception uses artificially composed stimuli to direct participants&#x00027; attention. During the MEG experiment in this study, all participants passively listened to the naturalistic music of Mozart&#x00027;s 12 Variations, K. 265, without any instructions regarding focusing their attention on a specific voice or melody. Thus, whether both figure and ground were processed attentively or pre-attentively remains unclear, given the absence of intentional attention control. Although listeners may focus on a particular voice while listening to music, the changing flow of music can encompass brief perceptible moments in which the figure&#x02013;ground relationship continuously shifts without listeners consciously realizing it. Indeed, music listeners can automatically process information such as syntactic errors and tone deviations without intentional attention (<xref ref-type="bibr" rid="B27">Maess et al., 2001</xref>; <xref ref-type="bibr" rid="B28">Naatanen et al., 2007</xref>). We interpreted that participants could attentively or pre-attentively detect sonic changes in the voices at that moment, although our data do not confirm that non-musicians could perceptually segregate the streams or identify which voice evoked the sonic differences (<xref ref-type="fig" rid="F3">Figure 3</xref>). Our results successfully captured the moment when the figure&#x02013;ground relationship between the upper and lower voices changed, as evidenced by the difference in frontotemporal connectivity for repeated phrases in the two variations and the correlation between frontotemporal and temporofrontal connectivity.</p>
<p>Familiarity is critical for explaining the figure&#x02013;ground experiment (<xref ref-type="bibr" rid="B33">Palmer, 1999</xref>; <xref ref-type="bibr" rid="B16">Hulleman and Humphreys, 2004</xref>; <xref ref-type="bibr" rid="B31">Nelson and Palmer, 2007</xref>). The target phrases in our stimuli involved the TTLS song, which has been used in studies related to the perception of familiar melodies (<xref ref-type="bibr" rid="B65">Trehub et al., 1985</xref>; <xref ref-type="bibr" rid="B67">Upitis, 1990</xref>; <xref ref-type="bibr" rid="B4">Besson et al., 1994</xref>; <xref ref-type="bibr" rid="B10">Creel, 2019</xref>). Familiarity would be naturally implied in the theme and all of its variations, considering that Mozart&#x00027;s 12 Variations, K. 265, is based on the TTLS melody. Logically, the familiarity implied in the TTLS melody might influence participants&#x00027; figure&#x02013;ground perception. However, in our previous study focusing on the TTLS melody (<xref ref-type="bibr" rid="B22">Kim et al., 2020</xref>), we could not directly prove the effect of familiarity on frontotemporal connectivity as the connectivity changed irrespective of the presence or absence of the TTLS melody. In our present study, the same TTLS melody appeared repeatedly in <italic>Variations II</italic> and <italic>IV</italic>. The effects of familiarity are consistent in both <italic>Variations II</italic> and <italic>IV</italic>. Thus, the TTLS melody was used to assess changes in the figure&#x02013;ground relationship.</p>
<p>Naturalistic stimuli have been used to examine various topics (<xref ref-type="bibr" rid="B44">Saarim&#x000E4;ki, 2021</xref>; <xref ref-type="bibr" rid="B17">Izen et al., 2023</xref>; <xref ref-type="bibr" rid="B59">Tervaniemi, 2023</xref>). In studies on the concepts of emotion (<xref ref-type="bibr" rid="B48">Singer et al., 2016</xref>; <xref ref-type="bibr" rid="B41">Putkinen et al., 2021</xref>), melodic expectation (<xref ref-type="bibr" rid="B19">Kern et al., 2022</xref>), temporal aspects of rhythm and beat (<xref ref-type="bibr" rid="B54">Sturm et al., 2015</xref>; <xref ref-type="bibr" rid="B73">Weineck et al., 2022</xref>), and familiarity (<xref ref-type="bibr" rid="B25">Leaver et al., 2009</xref>), multiple naturalistic pieces have been used as musical stimuli. Some studies using naturalistic stimuli have examined their hypotheses on topics such as motif, musical features, timbre, and depression, based on a single piece (<xref ref-type="bibr" rid="B2">Alluri et al., 2012</xref>; <xref ref-type="bibr" rid="B9">Cong et al., 2013</xref>; <xref ref-type="bibr" rid="B8">Burunat et al., 2016</xref>; <xref ref-type="bibr" rid="B26">Liu et al., 2020</xref>). Our hypothesis was also created for the melody of TTLS and the figure&#x02013;ground relationship of voices using Mozart&#x00027;s 12 Variations, K. 265. In the fields of audiation (<xref ref-type="bibr" rid="B66">Uhlig et al., 2013</xref>; <xref ref-type="bibr" rid="B42">Ragert et al., 2014</xref>; <xref ref-type="bibr" rid="B15">Hausfeld et al., 2018</xref>; <xref ref-type="bibr" rid="B3">Barrett et al., 2021</xref>) and vision (<xref ref-type="bibr" rid="B36">Peterson et al., 1991</xref>; <xref ref-type="bibr" rid="B24">Lamme, 1995</xref>; <xref ref-type="bibr" rid="B55">Super et al., 2003</xref>; <xref ref-type="bibr" rid="B74">Zhang and Von Der Heydt, 2010</xref>; <xref ref-type="bibr" rid="B69">Von der Heydt, 2015</xref>), changes in the figure&#x02013;ground relationship between the elements of an object can be simply observed and explained by comparing the related objects. However, naturalistic music has its own narrative, which can be described by its structure. The connectivity reflects complicated processes for the target phrases of 2.1 s without the context and for each 2.1-s-long target phrases in the theme and variations, leading up to the target phrase. This approach, however, may constitute a critical shortcoming of our study, compared with measurements using artificially composed stimuli. Furthermore, no verbal reports or other measures were obtained during or after MEG recording to determine what participants perceived as the &#x0201C;figure&#x0201D; in the music at each moment, including critical time windows. As a result, connectivity changes were measured under naturalistic listening conditions in which participants passively listened to music. These limitations can be addressed in future studies using novel paradigms that incorporate detailed behavioral responses and larger sample sizes. Nevertheless, our results reflected the humans&#x00027; ubiquitous experiences with individual participants.</p>
<p>In addition to the use of naturalistic music, our study had other limitations. While non-musicians can perceive the figure separately from the ground (<xref ref-type="bibr" rid="B63">Toth et al., 2016</xref>), they tend to focus more on the upper voice than on a lower one (<xref ref-type="bibr" rid="B49">Sloboda and Edworthy, 1981</xref>). In contrast, musicians are more sensitive to voice perception and are better able to distinguish voices (<xref ref-type="bibr" rid="B13">Fujioka et al., 2005</xref>; <xref ref-type="bibr" rid="B52">Strait et al., 2015</xref>). Music training significantly influences selective attention (<xref ref-type="bibr" rid="B40">Puschmann et al., 2019</xref>). We did not recruit musicians as participants to examine our hypothesis in terms of basic musical ability. Recruiting non-musicians might have impacted our results. The temporofrontal connectivity may show a statistically significant distinction (<italic>P</italic> &#x0003C; 0.05) since the recognition of different elements in the lower voices (via temporofrontal connectivity) involves a more complex cognitive process. Therefore, future studies should verify these results with musicians. Furthermore, this study concentrated on frontal and temporal regions. Our findings should be verified at the whole-brain level. In the current experimental paradigm, we did not consider an additional test individual preference of subjects. The preference is also influential of music listening, and which should be addressed importantly in further studies with a novel experimental paradigm. The MEG recording and analysis approach used in this study may be replicable using other modalities, such as EEG. Despite these limitations, the use of Mozart&#x00027;s 12 Variations, K. 265, was invaluable in understanding the fundamental neural processes associated with processing real music featuring multiple voices and elucidating the human experience of music. Our findings elucidated how the brain dissects voices from the multidimensional structures of music and reconstructs the figure&#x02013;ground relationship between voices.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s6">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Institutional Review Board of the Clinical Research Institute at Seoul National University Hospital (IRB No. C-1003-015-311). The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>CK: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. J-ES: Methodology, Writing &#x02013; review &#x00026; editing. JS: Investigation, Writing &#x02013; review &#x00026; editing, Methodology. CC: Funding acquisition, Supervision, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<ack><title>Acknowledgments</title><p>We sincerely appreciate Ji Hyang Nam for her technical support in MEG data acquisition.</p></ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author declares that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. Furthermore, I acknowledge the use of ChatGTP (OpenAI Version 2) for assistance with language editing during the preparation for the original draft of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnins.2025.1605800/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnins.2025.1605800/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alho</surname> <given-names>K.</given-names></name> <name><surname>Salmi</surname> <given-names>J.</given-names></name> <name><surname>Koistinen</surname> <given-names>S.</given-names></name> <name><surname>Salonen</surname> <given-names>O.</given-names></name> <name><surname>Rinne</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). <article-title>Top-down controlled and bottom-up triggered orienting of auditory attention to pitch activate overlapping brain networks</article-title>. <source>Brain Res.</source> <volume>1626</volume>, <fpage>136</fpage>&#x02013;<lpage>145</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.brainres.2014.12.050</pub-id><pub-id pub-id-type="pmid">25557401</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alluri</surname> <given-names>V.</given-names></name> <name><surname>Toiviainen</surname> <given-names>P.</given-names></name> <name><surname>Jaaskelainen</surname> <given-names>I. P.</given-names></name> <name><surname>Glerean</surname> <given-names>E.</given-names></name> <name><surname>Sams</surname> <given-names>M.</given-names></name> <name><surname>Brattico</surname> <given-names>E.</given-names></name></person-group> (<year>2012</year>). <article-title>Large-scale brain networks emerge from dynamic processing of musical timbre, key and rhythm</article-title>. <source>Neuroimage</source> <volume>59</volume>, <fpage>3677</fpage>&#x02013;<lpage>3689</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.11.019</pub-id><pub-id pub-id-type="pmid">22116038</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barrett</surname> <given-names>K. C.</given-names></name> <name><surname>Ashley</surname> <given-names>R.</given-names></name> <name><surname>Strait</surname> <given-names>D. L.</given-names></name> <name><surname>Skoe</surname> <given-names>E.</given-names></name> <name><surname>Limb</surname> <given-names>C. J.</given-names></name> <name><surname>Kraus</surname> <given-names>N.</given-names></name></person-group> (<year>2021</year>). <article-title>Multi-voiced music bypasses attentional limitations in the brain</article-title>. <source>Front. Neurosci.</source> <volume>15</volume>:<fpage>588914</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2021.588914</pub-id><pub-id pub-id-type="pmid">33584187</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Besson</surname> <given-names>M.</given-names></name> <name><surname>Fa&#x000EF;ta</surname> <given-names>F.</given-names></name> <name><surname>Requin</surname> <given-names>J.</given-names></name></person-group> (<year>1994</year>). <article-title>Brain waves associated with musical incongruities differ for musicians and non-musicians</article-title>. <source>Neurosci. Lett.</source> <volume>168</volume>, <fpage>101</fpage>&#x02013;<lpage>105</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0304-3940(94)90426-X</pub-id><pub-id pub-id-type="pmid">8028758</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bigand</surname> <given-names>E.</given-names></name> <name><surname>McAdams</surname> <given-names>S.</given-names></name> <name><surname>For&#x000EA;t</surname> <given-names>S.</given-names></name></person-group> (<year>2000</year>). <article-title>Divided attention in music</article-title>. <source>Int. J. Psychol.</source> <volume>35</volume>, <fpage>270</fpage>&#x02013;<lpage>278</lpage>. doi: <pub-id pub-id-type="doi">10.1080/002075900750047987</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Bregman</surname> <given-names>A. S.</given-names></name></person-group> (<year>1994</year>). <source>Auditory Scene Analysis: The Perceptual Organization of Sound</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT Press</publisher-name>. doi: <pub-id pub-id-type="doi">10.1121/1.408434</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Burunat</surname> <given-names>I.</given-names></name> <name><surname>Alluri</surname> <given-names>V.</given-names></name> <name><surname>Toiviainen</surname> <given-names>P.</given-names></name> <name><surname>Numminen</surname> <given-names>J.</given-names></name> <name><surname>Brattico</surname> <given-names>E.</given-names></name></person-group> (<year>2014</year>). <article-title>Dynamics of brain activity underlying working memory for music in a naturalistic condition</article-title>. <source>Cortex</source> <volume>57</volume>, <fpage>254</fpage>&#x02013;<lpage>269</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2014.04.012</pub-id><pub-id pub-id-type="pmid">24949579</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Burunat</surname> <given-names>I.</given-names></name> <name><surname>Toiviainen</surname> <given-names>P.</given-names></name> <name><surname>Alluri</surname> <given-names>V.</given-names></name> <name><surname>Bogert</surname> <given-names>B.</given-names></name> <name><surname>Ristaniemi</surname> <given-names>T.</given-names></name> <name><surname>Sams</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>The reliability of continuous brain responses during naturalistic listening to music</article-title>. <source>Neuroimage</source> <volume>124</volume>, <fpage>224</fpage>&#x02013;<lpage>231</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.09.005</pub-id><pub-id pub-id-type="pmid">26364862</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cong</surname> <given-names>F.</given-names></name> <name><surname>Alluri</surname> <given-names>V.</given-names></name> <name><surname>Nandi</surname> <given-names>A. K.</given-names></name> <name><surname>Toiviainen</surname> <given-names>P.</given-names></name> <name><surname>Fa</surname> <given-names>R.</given-names></name> <name><surname>Abu-Jamous</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Linking brain responses to naturalistic music through analysis of ongoing EEG and stimulus features</article-title>. <source>IEEE Trans. Multimedia</source> <volume>15</volume>, <fpage>1060</fpage>&#x02013;<lpage>1069</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMM.2013.2253452</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Creel</surname> <given-names>S. C.</given-names></name></person-group> (<year>2019</year>). <article-title>The familiar-melody advantage in auditory perceptual development: Parallels between spoken language acquisition and general auditory perception</article-title>. <source>Attention Percept. Psychophys.</source> <volume>81</volume>, <fpage>948</fpage>&#x02013;<lpage>957</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13414-018-01663-7</pub-id><pub-id pub-id-type="pmid">30635834</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Deutch</surname> <given-names>D.</given-names></name></person-group> (<year>2019</year>). <source>Chapter 3. The Perceptual Organization of Streams of Sound</source>. <publisher-loc>Oxford</publisher-loc>: <publisher-name>Oxford University Press</publisher-name>. doi: <pub-id pub-id-type="doi">10.1093/oso/9780190206833.003.0004</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dzafic</surname> <given-names>I.</given-names></name> <name><surname>Larsen</surname> <given-names>K. M.</given-names></name> <name><surname>Darke</surname> <given-names>H.</given-names></name> <name><surname>Pertile</surname> <given-names>H.</given-names></name> <name><surname>Carter</surname> <given-names>O.</given-names></name> <name><surname>Sundram</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Stronger top-down and weaker bottom-up frontotemporal connections during sensory learning are associated with severity of psychotic phenomena</article-title>. <source>Schizophr. Bull.</source> <volume>47</volume>, <fpage>1039</fpage>&#x02013;<lpage>1047</lpage>. doi: <pub-id pub-id-type="doi">10.1093/schbul/sbaa188</pub-id><pub-id pub-id-type="pmid">33404057</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fujioka</surname> <given-names>T.</given-names></name> <name><surname>Trainor</surname> <given-names>L. J.</given-names></name> <name><surname>Ross</surname> <given-names>B.</given-names></name> <name><surname>Kakigi</surname> <given-names>R.</given-names></name> <name><surname>Pantev</surname> <given-names>C.</given-names></name></person-group> (<year>2005</year>). <article-title>Automatic encoding of polyphonic melodies in musicians and nonmusicians</article-title>. <source>J. Cogn. Neurosci.</source> <volume>17</volume>, <fpage>1578</fpage>&#x02013;<lpage>1592</lpage>. doi: <pub-id pub-id-type="doi">10.1162/089892905774597263</pub-id><pub-id pub-id-type="pmid">16269098</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Habermeyer</surname> <given-names>B.</given-names></name> <name><surname>Herdener</surname> <given-names>M.</given-names></name> <name><surname>Esposito</surname> <given-names>F.</given-names></name> <name><surname>Hilti</surname> <given-names>C. C.</given-names></name> <name><surname>Klarhofer</surname> <given-names>M.</given-names></name> <name><surname>di Salle</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Neural correlates of pre-attentive processing of pattern deviance in professional musicians</article-title>. <source>Hum. Brain Mapp.</source> <volume>30</volume>, <fpage>3736</fpage>&#x02013;<lpage>3747</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.20802</pub-id><pub-id pub-id-type="pmid">19492302</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hausfeld</surname> <given-names>L.</given-names></name> <name><surname>Riecke</surname> <given-names>L.</given-names></name> <name><surname>Valente</surname> <given-names>G.</given-names></name> <name><surname>Formisano</surname> <given-names>E.</given-names></name></person-group> (<year>2018</year>). <article-title>Cortical tracking of multiple streams outside the focus of attention in naturalistic auditory scenes</article-title>. <source>Neuroimage</source> <volume>181</volume>, <fpage>617</fpage>&#x02013;<lpage>626</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2018.07.052</pub-id><pub-id pub-id-type="pmid">30048749</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hulleman</surname> <given-names>J.</given-names></name> <name><surname>Humphreys</surname> <given-names>G. W.</given-names></name></person-group> (<year>2004</year>). <article-title>A new cue to figure&#x02013;ground coding: top&#x02013;bottom polarity</article-title>. <source>Vision Res.</source> <volume>44</volume>, <fpage>2779</fpage>&#x02013;<lpage>2791</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.visres.2004.06.012</pub-id><pub-id pub-id-type="pmid">15342222</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Izen</surname> <given-names>S. C.</given-names></name> <name><surname>Cassano-Coleman</surname> <given-names>R. Y.</given-names></name> <name><surname>Piazza</surname> <given-names>E. A.</given-names></name></person-group> (<year>2023</year>). <article-title>Music as a window into real-world communication</article-title>. <source>Front. Psychol.</source> <volume>14</volume>:<fpage>1012839</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2023.1012839</pub-id><pub-id pub-id-type="pmid">37496799</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jin</surname> <given-names>S. H.</given-names></name> <name><surname>Lin</surname> <given-names>P.</given-names></name> <name><surname>Hallett</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>Linear and nonlinear information flow based on time-delayed mutual information method and its application to corticomuscular interaction</article-title>. <source>Clin. Neurophysiol.</source> <volume>121</volume>, <fpage>392</fpage>&#x02013;<lpage>401</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.clinph.2009.09.033</pub-id><pub-id pub-id-type="pmid">20044309</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kern</surname> <given-names>P.</given-names></name> <name><surname>Heilbron</surname> <given-names>M.</given-names></name> <name><surname>de Lange</surname> <given-names>F. P.</given-names></name> <name><surname>Spaak</surname> <given-names>E.</given-names></name></person-group> (<year>2022</year>). <article-title>Cortical activity during naturalistic music listening reflects short-range predictions based on long-term experience</article-title>. <source>Elife</source> <volume>11</volume>:<fpage>e80935</fpage>. doi: <pub-id pub-id-type="doi">10.7554/eLife.80935.sa2</pub-id><pub-id pub-id-type="pmid">36562532</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>C. H.</given-names></name> <name><surname>Jin</surname> <given-names>S. H.</given-names></name> <name><surname>Kim</surname> <given-names>J. S.</given-names></name> <name><surname>Kim</surname> <given-names>Y.</given-names></name> <name><surname>Yi</surname> <given-names>S. W.</given-names></name> <name><surname>Chung</surname> <given-names>C. K.</given-names></name></person-group> (<year>2021</year>). <article-title>Dissociation of connectivity for syntactic irregularity and perceptual ambiguity in musical chord stimuli</article-title>. <source>Front. Neurosci.</source> <volume>15</volume>:<fpage>693629</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2021.693629</pub-id><pub-id pub-id-type="pmid">34526877</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>C. H.</given-names></name> <name><surname>Kim</surname> <given-names>J. S.</given-names></name> <name><surname>Choi</surname> <given-names>Y.</given-names></name> <name><surname>Kyong</surname> <given-names>J. S.</given-names></name> <name><surname>Kim</surname> <given-names>Y.</given-names></name> <name><surname>Yi</surname> <given-names>S. W.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Change in left inferior frontal connectivity with less unexpected harmonic cadence by musical expertise</article-title>. <source>PLoS ONE</source> <volume>14</volume>:<fpage>e0223283</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0223283</pub-id><pub-id pub-id-type="pmid">31714920</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>C. H.</given-names></name> <name><surname>Seol</surname> <given-names>J.</given-names></name> <name><surname>Jin</surname> <given-names>S.-H.</given-names></name> <name><surname>Kim</surname> <given-names>J. S.</given-names></name> <name><surname>Kim</surname> <given-names>Y.</given-names></name> <name><surname>Yi</surname> <given-names>S. W.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Increased fronto-temporal connectivity by modified melody in real music</article-title>. <source>PLoS ONE</source> <volume>15</volume>:<fpage>e0235770</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0235770</pub-id><pub-id pub-id-type="pmid">32639987</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>K&#x000F6;hler</surname> <given-names>W.</given-names></name></person-group> (<year>1967</year>). <article-title>Gestalt psychology</article-title>. <source>Psychologische Forschung</source> 31, XVIII&#x02013;XXX. doi: <pub-id pub-id-type="doi">10.1007/BF00422382</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lamme</surname> <given-names>V. A.</given-names></name></person-group> (<year>1995</year>). <article-title>The neurophysiology of figure-ground segregation in primary visual cortex</article-title>. <source>J. Neurosci.</source> <volume>15</volume>, <fpage>1605</fpage>&#x02013;<lpage>1615</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.15-02-01605.1995</pub-id><pub-id pub-id-type="pmid">7869121</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Leaver</surname> <given-names>A. M.</given-names></name> <name><surname>Van Lare</surname> <given-names>J.</given-names></name> <name><surname>Zielinski</surname> <given-names>B.</given-names></name> <name><surname>Halpern</surname> <given-names>A. R.</given-names></name> <name><surname>Rauschecker</surname> <given-names>J. P.</given-names></name></person-group> (<year>2009</year>). <article-title>Brain activation during anticipation of sound sequences</article-title>. <source>J. Neurosci.</source> <volume>29</volume>, <fpage>2477</fpage>&#x02013;<lpage>2485</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4921-08.2009</pub-id><pub-id pub-id-type="pmid">19244522</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>W.</given-names></name> <name><surname>Zhang</surname> <given-names>C.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <name><surname>Xu</surname> <given-names>J.</given-names></name> <name><surname>Chang</surname> <given-names>Y.</given-names></name> <name><surname>Ristaniemi</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Functional connectivity of major depression disorder using ongoing EEG during music perception</article-title>. <source>Clin. Neurophysiol.</source> <volume>131</volume>, <fpage>2413</fpage>&#x02013;<lpage>2422</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.clinph.2020.06.031</pub-id><pub-id pub-id-type="pmid">32828045</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Maess</surname> <given-names>B.</given-names></name> <name><surname>Koelsch</surname> <given-names>S.</given-names></name> <name><surname>Gunter</surname> <given-names>T. C.</given-names></name> <name><surname>Friederici</surname> <given-names>A. D.</given-names></name></person-group> (<year>2001</year>). <article-title>Musical syntax is processed in Broca&#x00027;s area: an MEG study</article-title>. <source>Nat. Neurosci.</source> <volume>4</volume>, <fpage>540</fpage>&#x02013;<lpage>545</lpage>. doi: <pub-id pub-id-type="doi">10.1038/87502</pub-id><pub-id pub-id-type="pmid">11319564</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Naatanen</surname> <given-names>R.</given-names></name> <name><surname>Paavilainen</surname> <given-names>P.</given-names></name> <name><surname>Rinne</surname> <given-names>T.</given-names></name> <name><surname>Alho</surname> <given-names>K.</given-names></name></person-group> (<year>2007</year>). <article-title>The mismatch negativity (MMN) in basic research of central auditory processing: a review</article-title>. <source>Clin. Neurophysiol.</source> <volume>118</volume>, <fpage>2544</fpage>&#x02013;<lpage>2590</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.clinph.2007.04.026</pub-id><pub-id pub-id-type="pmid">17931964</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nan</surname> <given-names>Y.</given-names></name> <name><surname>Friederici</surname> <given-names>A. D.</given-names></name></person-group> (<year>2013</year>). <article-title>Differential roles of right temporal cortex and Broca&#x00027;s area in pitch processing: evidence from music and Mandarin</article-title>. <source>Hum. Brain Mapp.</source> <volume>34</volume>, <fpage>2045</fpage>&#x02013;<lpage>2054</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.22046</pub-id><pub-id pub-id-type="pmid">22431306</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Narmour</surname> <given-names>E.</given-names></name></person-group> (<year>2000</year>). <article-title>Music expectation by cognitive rule-mapping</article-title>. <source>Music Percept.</source> <volume>17</volume>, <fpage>329</fpage>&#x02013;<lpage>398</lpage>. doi: <pub-id pub-id-type="doi">10.2307/40285821</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nelson</surname> <given-names>R. A.</given-names></name> <name><surname>Palmer</surname> <given-names>S. E.</given-names></name></person-group> (<year>2007</year>). <article-title>Familiar shapes attract attention in figure-ground displays</article-title>. <source>Percept. Psychophys.</source> <volume>69</volume>, <fpage>382</fpage>&#x02013;<lpage>392</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03193759</pub-id><pub-id pub-id-type="pmid">17672426</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>O&#x00027;Sullivan</surname> <given-names>J. A.</given-names></name> <name><surname>Shamma</surname> <given-names>S. A.</given-names></name> <name><surname>Lalor</surname> <given-names>E. C.</given-names></name></person-group> (<year>2015</year>). <article-title>Evidence for neural computations of temporal coherence in an auditory scene and their enhancement during active listening</article-title>. <source>J. Neurosci.</source> <volume>35</volume>, <fpage>7256</fpage>&#x02013;<lpage>7263</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4973-14.2015</pub-id><pub-id pub-id-type="pmid">25948273</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Palmer</surname> <given-names>S. E.</given-names></name></person-group> (<year>1999</year>). <source>Vision Science: Photons to Phenomenology</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT Press</publisher-name>.</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Papoutsi</surname> <given-names>M.</given-names></name> <name><surname>Stamatakis</surname> <given-names>E. A.</given-names></name> <name><surname>Griffiths</surname> <given-names>J.</given-names></name> <name><surname>Marslen-Wilson</surname> <given-names>W. D.</given-names></name> <name><surname>Tyler</surname> <given-names>L. K.</given-names></name></person-group> (<year>2011</year>). <article-title>Is left fronto-temporal connectivity essential for syntax? Effective connectivity, tractography and performance in left-hemisphere damaged patients</article-title>. <source>Neuroimage</source> <volume>58</volume>, <fpage>656</fpage>&#x02013;<lpage>664</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.06.036</pub-id><pub-id pub-id-type="pmid">21722742</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Perani</surname> <given-names>D.</given-names></name> <name><surname>Saccuman</surname> <given-names>M. C.</given-names></name> <name><surname>Scifo</surname> <given-names>P.</given-names></name> <name><surname>Spada</surname> <given-names>D.</given-names></name> <name><surname>Andreolli</surname> <given-names>G.</given-names></name> <name><surname>Rovelli</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Functional specializations for music processing in the human newborn brain</article-title>. <source>Proc. Natl. Acad. Sci. U.S.A.</source> <volume>107</volume>, <fpage>4758</fpage>&#x02013;<lpage>4763</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.0909074107</pub-id><pub-id pub-id-type="pmid">20176953</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Peterson</surname> <given-names>M. A.</given-names></name> <name><surname>Harvey</surname> <given-names>E. M.</given-names></name> <name><surname>Weidenbacher</surname> <given-names>H. J.</given-names></name></person-group> (<year>1991</year>). <article-title>Shape recognition contributions to figure-ground reversal: which route counts?</article-title> <source>J. Exp. Psychol. Hum. Percept. Perform.</source> <volume>17</volume>:<fpage>1075</fpage>. doi: <pub-id pub-id-type="doi">10.1037//0096-1523.17.4.1075</pub-id><pub-id pub-id-type="pmid">1837298</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Plailly</surname> <given-names>J.</given-names></name> <name><surname>Tillmann</surname> <given-names>B.</given-names></name> <name><surname>Royet</surname> <given-names>J. P.</given-names></name></person-group> (<year>2007</year>). <article-title>The feeling of familiarity of music and odors: the same neural signature?</article-title> <source>Cereb. Cortex</source> <volume>17</volume>, <fpage>2650</fpage>&#x02013;<lpage>2658</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhl173</pub-id><pub-id pub-id-type="pmid">17289777</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Platel</surname> <given-names>H.</given-names></name> <name><surname>Baron</surname> <given-names>J. C.</given-names></name> <name><surname>Desgranges</surname> <given-names>B.</given-names></name> <name><surname>Bernard</surname> <given-names>F.</given-names></name> <name><surname>Eustache</surname> <given-names>F.</given-names></name></person-group> (<year>2003</year>). <article-title>Semantic and episodic memory of music are subserved by distinct neural networks</article-title>. <source>Neuroimage</source> <volume>20</volume>, <fpage>244</fpage>&#x02013;<lpage>256</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S1053-8119(03)00287-8</pub-id><pub-id pub-id-type="pmid">14527585</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Poort</surname> <given-names>J.</given-names></name> <name><surname>Raudies</surname> <given-names>F.</given-names></name> <name><surname>Wannig</surname> <given-names>A.</given-names></name> <name><surname>Lamme</surname> <given-names>V. A.</given-names></name> <name><surname>Neumann</surname> <given-names>H.</given-names></name> <name><surname>Roelfsema</surname> <given-names>P. R.</given-names></name></person-group> (<year>2012</year>). <article-title>The role of attention in figure-ground segregation in areas V1 and V4 of the visual cortex</article-title>. <source>Neuron</source> <volume>75</volume>, <fpage>143</fpage>&#x02013;<lpage>156</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2012.04.032</pub-id><pub-id pub-id-type="pmid">22794268</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Puschmann</surname> <given-names>S.</given-names></name> <name><surname>Baillet</surname> <given-names>S.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>2019</year>). <article-title>Musicians at the Cocktail party: neural substrates of musical training during selective listening in multispeaker situations</article-title>. <source>Cereb. Cortex</source> <volume>29</volume>, <fpage>3253</fpage>&#x02013;<lpage>3265</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhy193</pub-id><pub-id pub-id-type="pmid">30137239</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Putkinen</surname> <given-names>V.</given-names></name> <name><surname>Nazari-Farsani</surname> <given-names>S.</given-names></name> <name><surname>Sepp&#x000E4;l&#x000E4;</surname> <given-names>K.</given-names></name> <name><surname>Karjalainen</surname> <given-names>T.</given-names></name> <name><surname>Sun</surname> <given-names>L.</given-names></name> <name><surname>Karlsson</surname> <given-names>H. K.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Decoding music-evoked emotions in the auditory and motor cortex</article-title>. <source>Cereb. Cortex</source> <volume>31</volume>, <fpage>2549</fpage>&#x02013;<lpage>2560</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhaa373</pub-id><pub-id pub-id-type="pmid">33367590</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ragert</surname> <given-names>M.</given-names></name> <name><surname>Fairhurst</surname> <given-names>M. T.</given-names></name> <name><surname>Keller</surname> <given-names>P. E.</given-names></name></person-group> (<year>2014</year>). <article-title>Segregation and integration of auditory streams when listening to multi-part music</article-title>. <source>PLoS ONE</source> <volume>9</volume>:<fpage>e84085</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0084085</pub-id><pub-id pub-id-type="pmid">24475030</pub-id></mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Roswandowitz</surname> <given-names>C.</given-names></name> <name><surname>Swanborough</surname> <given-names>H.</given-names></name> <name><surname>Fruhholz</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Categorizing human vocal signals depends on an integrated auditory-frontal cortical network</article-title>. <source>Hum. Brain Mapp.</source> <volume>42</volume>, <fpage>1503</fpage>&#x02013;<lpage>1517</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.25309</pub-id><pub-id pub-id-type="pmid">33615612</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saarim&#x000E4;ki</surname> <given-names>H.</given-names></name></person-group> (<year>2021</year>). <article-title>Naturalistic stimuli in affective neuroimaging: a review</article-title>. <source>Front. Hum. Neurosci.</source> <volume>15</volume>:<fpage>675068</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2021.675068</pub-id><pub-id pub-id-type="pmid">34220474</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sabri</surname> <given-names>M.</given-names></name> <name><surname>Liebenthal</surname> <given-names>E.</given-names></name> <name><surname>Waldron</surname> <given-names>E. J.</given-names></name> <name><surname>Medler</surname> <given-names>D. A.</given-names></name> <name><surname>Binder</surname> <given-names>J. R.</given-names></name></person-group> (<year>2006</year>). <article-title>Attentional modulation in the detection of irrelevant deviance: a simultaneous ERP/fMRI study</article-title>. <source>J. Cogn. Neurosci.</source> <volume>18</volume>, <fpage>689</fpage>&#x02013;<lpage>700</lpage>. doi: <pub-id pub-id-type="doi">10.1162/jocn.2006.18.5.689</pub-id><pub-id pub-id-type="pmid">16768370</pub-id></mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sammler</surname> <given-names>D.</given-names></name> <name><surname>Koelsch</surname> <given-names>S.</given-names></name> <name><surname>Friederici</surname> <given-names>A. D.</given-names></name></person-group> (<year>2011</year>). <article-title>Are left fronto-temporal brain areas a prerequisite for normal music-syntactic processing?</article-title> <source>Cortex</source> <volume>47</volume>, <fpage>659</fpage>&#x02013;<lpage>673</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2010.04.007</pub-id><pub-id pub-id-type="pmid">20570253</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schneider</surname> <given-names>P.</given-names></name> <name><surname>Sluming</surname> <given-names>V.</given-names></name> <name><surname>Roberts</surname> <given-names>N.</given-names></name> <name><surname>Scherg</surname> <given-names>M.</given-names></name> <name><surname>Goebel</surname> <given-names>R.</given-names></name> <name><surname>Specht</surname> <given-names>H. J.</given-names></name> <etal/></person-group>. (<year>2005</year>). <article-title>Structural and functional asymmetry of lateral Heschl&#x00027;s gyrus reflects pitch perception preference</article-title>. <source>Nat. Neurosci.</source> <volume>8</volume>, <fpage>1241</fpage>&#x02013;<lpage>1247</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn1530</pub-id><pub-id pub-id-type="pmid">16116442</pub-id></mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singer</surname> <given-names>N.</given-names></name> <name><surname>Jacoby</surname> <given-names>N.</given-names></name> <name><surname>Lin</surname> <given-names>T.</given-names></name> <name><surname>Raz</surname> <given-names>G.</given-names></name> <name><surname>Shpigelman</surname> <given-names>L.</given-names></name> <name><surname>Gilam</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Common modulation of limbic network activation underlies musical emotions as they unfold</article-title>. <source>Neuroimage</source> <volume>141</volume>, <fpage>517</fpage>&#x02013;<lpage>529</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2016.07.002</pub-id><pub-id pub-id-type="pmid">27389788</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sloboda</surname> <given-names>J.</given-names></name> <name><surname>Edworthy</surname> <given-names>J.</given-names></name></person-group> (<year>1981</year>). <article-title>Attending to two melodies at once: the of key relatedness</article-title>. <source>Psychol. Music</source> <volume>9</volume>, <fpage>39</fpage>&#x02013;<lpage>43</lpage>. doi: <pub-id pub-id-type="doi">10.1177/03057356810090010701</pub-id></mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Snyder</surname> <given-names>J. S.</given-names></name> <name><surname>Alain</surname> <given-names>C.</given-names></name> <name><surname>Picton</surname> <given-names>T. W.</given-names></name></person-group> (<year>2006</year>). <article-title>Effects of attention on neuroelectric correlates of auditory stream segregation</article-title>. <source>J. Cogn. Neurosci.</source> <volume>18</volume>, <fpage>1</fpage>&#x02013;<lpage>13</lpage>. doi: <pub-id pub-id-type="doi">10.1162/089892906775250021</pub-id><pub-id pub-id-type="pmid">16417678</pub-id></mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Spada</surname> <given-names>D.</given-names></name> <name><surname>Verga</surname> <given-names>L.</given-names></name> <name><surname>Iadanza</surname> <given-names>A.</given-names></name> <name><surname>Tettamanti</surname> <given-names>M.</given-names></name> <name><surname>Perani</surname> <given-names>D.</given-names></name></person-group> (<year>2014</year>). <article-title>The auditory scene: an fMRI study on melody and accompaniment in professional pianists</article-title>. <source>Neuroimage</source> <volume>102</volume>(<issue>Pt 2</issue>), <fpage>764</fpage>&#x02013;<lpage>775</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.08.036</pub-id><pub-id pub-id-type="pmid">25175543</pub-id></mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Strait</surname> <given-names>D. L.</given-names></name> <name><surname>Slater</surname> <given-names>J.</given-names></name> <name><surname>O&#x00027;Connell</surname> <given-names>S.</given-names></name> <name><surname>Kraus</surname> <given-names>N.</given-names></name></person-group> (<year>2015</year>). <article-title>Music training relates to the development of neural mechanisms of selective auditory attention</article-title>. <source>Dev. Cogn. Neurosci.</source> <volume>12</volume>, <fpage>94</fpage>&#x02013;<lpage>104</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.dcn.2015.01.001</pub-id><pub-id pub-id-type="pmid">25660985</pub-id></mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Str&#x000FC;ber</surname> <given-names>D.</given-names></name> <name><surname>Stadler</surname> <given-names>M.</given-names></name></person-group> (<year>1999</year>). <article-title>Differences in top&#x02014;down influences on the reversal rate of different categories of reversible figures</article-title>. <source>Perception</source> <volume>28</volume>, <fpage>1185</fpage>&#x02013;<lpage>1196</lpage>. doi: <pub-id pub-id-type="doi">10.1068/p2973</pub-id><pub-id pub-id-type="pmid">10694967</pub-id></mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sturm</surname> <given-names>I.</given-names></name> <name><surname>Dahne</surname> <given-names>S.</given-names></name> <name><surname>Blankertz</surname> <given-names>B.</given-names></name> <name><surname>Curio</surname> <given-names>G.</given-names></name></person-group> (<year>2015</year>). <article-title>Multi-variate EEG analysis as a novel tool to examine brain responses to naturalistic music stimuli</article-title>. <source>PLoS ONE</source> <volume>10</volume>:<fpage>e0141281</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0141281</pub-id><pub-id pub-id-type="pmid">26510120</pub-id></mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Super</surname> <given-names>H.</given-names></name> <name><surname>van der Togt</surname> <given-names>C.</given-names></name> <name><surname>Spekreijse</surname> <given-names>H.</given-names></name> <name><surname>Lamme</surname> <given-names>V. A.</given-names></name></person-group> (<year>2003</year>). <article-title>Internal state of monkey primary visual cortex (V1) predicts figure&#x02013;ground perception</article-title>. <source>J. Neurosci.</source> <volume>23</volume>, <fpage>3407</fpage>&#x02013;<lpage>3414</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.23-08-03407.2003</pub-id><pub-id pub-id-type="pmid">12716948</pub-id></mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Taher</surname> <given-names>C.</given-names></name> <name><surname>Rusch</surname> <given-names>R.</given-names></name> <name><surname>McAdams</surname> <given-names>S.</given-names></name></person-group> (<year>2016</year>). <article-title>Effects of repetition on attention in two-part counterpoint</article-title>. <source>Music Percept. Interdiscipl. J.</source> <volume>33</volume>, <fpage>306</fpage>&#x02013;<lpage>318</lpage>. doi: <pub-id pub-id-type="doi">10.1525/mp.2016.33.3.306</pub-id></mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Taulu</surname> <given-names>S.</given-names></name> <name><surname>Hari</surname> <given-names>R.</given-names></name></person-group> (<year>2009</year>). <article-title>Removal of magnetoencephalographic artifacts with temporal signal-space separation: demonstration with single-trial auditory-evoked responses</article-title>. <source>Hum. Brain Mapp.</source> <volume>30</volume>, <fpage>1524</fpage>&#x02013;<lpage>1534</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.20627</pub-id><pub-id pub-id-type="pmid">18661502</pub-id></mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Teki</surname> <given-names>S.</given-names></name> <name><surname>Chait</surname> <given-names>M.</given-names></name> <name><surname>Kumar</surname> <given-names>S.</given-names></name> <name><surname>von Kriegstein</surname> <given-names>K.</given-names></name> <name><surname>Griffiths</surname> <given-names>T. D.</given-names></name></person-group> (<year>2011</year>). <article-title>Brain bases for auditory stimulus-driven figure-ground segregation</article-title>. <source>J. Neurosci.</source> <volume>31</volume>, <fpage>164</fpage>&#x02013;<lpage>171</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.3788-10.2011</pub-id><pub-id pub-id-type="pmid">21209201</pub-id></mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tervaniemi</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>The neuroscience of music&#x02013;towards ecological validity</article-title>. <source>Trends Neurosci</source>. <volume>46</volume>, <fpage>355</fpage>&#x02013;<lpage>364</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tins.2023.03.001</pub-id></mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tesche</surname> <given-names>C. D.</given-names></name> <name><surname>Uusitalo</surname> <given-names>M. A.</given-names></name> <name><surname>Ilmoniemi</surname> <given-names>R. J.</given-names></name> <name><surname>Huotilainen</surname> <given-names>M.</given-names></name> <name><surname>Kajola</surname> <given-names>M.</given-names></name> <name><surname>Salonen</surname> <given-names>O.</given-names></name></person-group> (<year>1995</year>). <article-title>Signal-space projections of MEG data characterize both distributed and well-localized neuronal sources</article-title>. <source>Electroencephalogr. Clin. Neurophysiol.</source> <volume>95</volume>, <fpage>189</fpage>&#x02013;<lpage>200</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0013-4694(95)00064-6</pub-id><pub-id pub-id-type="pmid">7555909</pub-id></mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tillmann</surname> <given-names>B.</given-names></name> <name><surname>Bharucha</surname> <given-names>J. J.</given-names></name> <name><surname>Bigand</surname> <given-names>E.</given-names></name></person-group> (<year>2000</year>). <article-title>Implicit learning of tonality: a self-organizing approach</article-title>. <source>Psychol. Rev.</source> <volume>107</volume>:<fpage>885</fpage>. doi: <pub-id pub-id-type="doi">10.1037/0033-295X.107.4.885</pub-id><pub-id pub-id-type="pmid">11089410</pub-id></mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tillmann</surname> <given-names>B.</given-names></name> <name><surname>Janata</surname> <given-names>P.</given-names></name> <name><surname>Bharucha</surname> <given-names>J. J.</given-names></name></person-group> (<year>2003</year>). <article-title>Activation of the inferior frontal cortex in musical priming</article-title>. <source>Brain Res. Cogn. Brain Res.</source> <volume>16</volume>, <fpage>145</fpage>&#x02013;<lpage>161</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0926-6410(02)00245-8</pub-id><pub-id pub-id-type="pmid">14681143</pub-id></mixed-citation>
</ref>
<ref id="B63">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Toth</surname> <given-names>B.</given-names></name> <name><surname>Kocsis</surname> <given-names>Z.</given-names></name> <name><surname>Haden</surname> <given-names>G. P.</given-names></name> <name><surname>Szerafin</surname> <given-names>A.</given-names></name> <name><surname>Shinn-Cunningham</surname> <given-names>B. G.</given-names></name> <name><surname>Winkler</surname> <given-names>I.</given-names></name></person-group> (<year>2016</year>). <article-title>EEG signatures accompanying auditory figure-ground segregation</article-title>. <source>Neuroimage</source> <volume>141</volume>, <fpage>108</fpage>&#x02013;<lpage>119</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2016.07.028</pub-id><pub-id pub-id-type="pmid">27421185</pub-id></mixed-citation>
</ref>
<ref id="B64">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Trainor</surname> <given-names>L. J.</given-names></name> <name><surname>Marie</surname> <given-names>C.</given-names></name> <name><surname>Bruce</surname> <given-names>I. C.</given-names></name> <name><surname>Bidelman</surname> <given-names>G. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Explaining the high voice superiority effect in polyphonic music: evidence from cortical evoked potentials and peripheral auditory models</article-title>. <source>Hear. Res.</source> <volume>308</volume>, <fpage>60</fpage>&#x02013;<lpage>70</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2013.07.014</pub-id><pub-id pub-id-type="pmid">23916754</pub-id></mixed-citation>
</ref>
<ref id="B65">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Trehub</surname> <given-names>S. E.</given-names></name> <name><surname>Morrongiello</surname> <given-names>B. A.</given-names></name> <name><surname>Thorpe</surname> <given-names>L. A.</given-names></name></person-group> (<year>1985</year>). <article-title>Children&#x00027;s perception of familiar melodies: the role of intervals, contour, and key</article-title>. <source>Psychomusicol. J. Res. Music Cogn.</source> <volume>5</volume>:<fpage>39</fpage>. doi: <pub-id pub-id-type="doi">10.1037/h0094201</pub-id></mixed-citation>
</ref>
<ref id="B66">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Uhlig</surname> <given-names>M.</given-names></name> <name><surname>Fairhurst</surname> <given-names>M. T.</given-names></name> <name><surname>Keller</surname> <given-names>P. E.</given-names></name></person-group> (<year>2013</year>). <article-title>The importance of integration and top-down salience when listening to complex multi-part musical stimuli</article-title>. <source>Neuroimage</source> <volume>77</volume>, <fpage>52</fpage>&#x02013;<lpage>61</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.03.051</pub-id><pub-id pub-id-type="pmid">23558103</pub-id></mixed-citation>
</ref>
<ref id="B67">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Upitis</surname> <given-names>R.</given-names></name></person-group> (<year>1990</year>). <article-title>Children&#x00027;s invented notations of familiar and unfamiliar melodies</article-title>. <source>Psychomusicol. J. Res. Music Cogn.</source> <volume>9</volume>:<fpage>89</fpage>. doi: <pub-id pub-id-type="doi">10.1037/h0094156</pub-id></mixed-citation>
</ref>
<ref id="B68">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van der Burght</surname> <given-names>C. L.</given-names></name> <name><surname>Goucha</surname> <given-names>T.</given-names></name> <name><surname>Friederici</surname> <given-names>A. D.</given-names></name> <name><surname>Kreitewolf</surname> <given-names>J.</given-names></name> <name><surname>Hartwigsen</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>Intonation guides sentence processing in the left inferior frontal gyrus</article-title>. <source>Cortex</source> <volume>117</volume>, <fpage>122</fpage>&#x02013;<lpage>134</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2019.02.011</pub-id><pub-id pub-id-type="pmid">30974320</pub-id></mixed-citation>
</ref>
<ref id="B69">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Von der Heydt</surname> <given-names>R.</given-names></name></person-group> (<year>2015</year>). <article-title>Figure&#x02013;ground organization and the emergence of proto-objects in the visual cortex</article-title>. <source>Front. Psychol.</source> <volume>6</volume>:<fpage>1695</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2015.01695</pub-id><pub-id pub-id-type="pmid">26579062</pub-id></mixed-citation>
</ref>
<ref id="B70">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wagemans</surname> <given-names>J.</given-names></name> <name><surname>Elder</surname> <given-names>J. H.</given-names></name> <name><surname>Kubovy</surname> <given-names>M.</given-names></name> <name><surname>Palmer</surname> <given-names>S. E.</given-names></name> <name><surname>Peterson</surname> <given-names>M. A.</given-names></name> <name><surname>Singh</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>A century of Gestalt psychology in visual perception: I. Perceptual grouping and figure-ground organization</article-title>. <source>Psychol. Bull.</source> <volume>138</volume>, <fpage>1172</fpage>&#x02013;<lpage>1217</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0029333</pub-id><pub-id pub-id-type="pmid">22845751</pub-id></mixed-citation>
</ref>
<ref id="B71">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Watanabe</surname> <given-names>T.</given-names></name> <name><surname>Yagishita</surname> <given-names>S.</given-names></name> <name><surname>Kikyo</surname> <given-names>H.</given-names></name></person-group> (<year>2008</year>). <article-title>Memory of music: roles of right hippocampus and left inferior frontal gyrus</article-title>. <source>Neuroimage</source> <volume>39</volume>, <fpage>483</fpage>&#x02013;<lpage>491</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.08.024</pub-id><pub-id pub-id-type="pmid">17905600</pub-id></mixed-citation>
</ref>
<ref id="B72">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Weilnhammer</surname> <given-names>V.</given-names></name> <name><surname>Fritsch</surname> <given-names>M.</given-names></name> <name><surname>Chikermane</surname> <given-names>M.</given-names></name> <name><surname>Eckert</surname> <given-names>A. L.</given-names></name> <name><surname>Kanthak</surname> <given-names>K.</given-names></name> <name><surname>Stuke</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>An active role of inferior frontal cortex in conscious experience</article-title>. <source>Curr. Biol.</source> <volume>31</volume>, <fpage>2868</fpage>&#x02013;<lpage>2880</lpage>.e2868. doi: <pub-id pub-id-type="doi">10.1016/j.cub.2021.04.043</pub-id><pub-id pub-id-type="pmid">33989530</pub-id></mixed-citation>
</ref>
<ref id="B73">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Weineck</surname> <given-names>K.</given-names></name> <name><surname>Wen</surname> <given-names>O. X.</given-names></name> <name><surname>Henry</surname> <given-names>M. J.</given-names></name></person-group> (<year>2022</year>). <article-title>Neural synchronization is strongest to the spectral flux of slow music and depends on familiarity and beat salience</article-title>. <source>Elife</source> <volume>11</volume>:<fpage>e75515</fpage>. doi: <pub-id pub-id-type="doi">10.7554/eLife.75515.sa2</pub-id><pub-id pub-id-type="pmid">36094165</pub-id></mixed-citation>
</ref>
<ref id="B74">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>N. R.</given-names></name> <name><surname>Von Der Heydt</surname> <given-names>R.</given-names></name></person-group> (<year>2010</year>). <article-title>Analysis of the context integration mechanisms underlying figure&#x02013;ground organization in the visual cortex</article-title>. <source>J. Neurosci.</source> <volume>30</volume>, <fpage>6482</fpage>&#x02013;<lpage>6496</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.5168-09.2010</pub-id><pub-id pub-id-type="pmid">20463212</pub-id></mixed-citation>
</ref>
<ref id="B75">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhu</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>M.</given-names></name> <name><surname>Lu</surname> <given-names>J.</given-names></name> <name><surname>Hu</surname> <given-names>J.</given-names></name> <name><surname>Kwok</surname> <given-names>V. P. Y.</given-names></name> <name><surname>Zhou</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Distinct spatiotemporal patterns of syntactic and semantic processing in human inferior frontal gyrus</article-title>. <source>Nat. Hum. Behav</source>. <volume>6</volume>, <fpage>1104</fpage>&#x02013;<lpage>1111</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41562-022-01334-6</pub-id><pub-id pub-id-type="pmid">35618778</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/427623/overview">Adam Linson</ext-link>, The Open University, United Kingdom</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2266537/overview">Zhiyuan Wang</ext-link>, Roku, Inc., United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/978407/overview">Tongning Wu</ext-link>, China Academy of Information and Communications Technology, China</p>
</fn>
</fn-group>
</back>
</article>