<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2026.1659797</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Music speaks louder than lyrics: a conceptual priming experiment</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Karbanova</surname> <given-names>Alice</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/3019980"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Cui</surname> <given-names>Anja-Xiaoxing</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/857713"/>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Romance Languages, Faculty of Arts, Masaryk University</institution>, <city>Brno</city>, <country country="cz">Czechia</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Musicology, Faculty of Philology and Cultural Studies, University of Vienna</institution>, <city>Vienna</city>, <country country="at">Austria</country></aff>
<aff id="aff3"><label>3</label><institution>Vienna Cognitive Science Hub, University of Vienna</institution>, <city>Vienna</city>, <country country="at">Austria</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Anja-Xiaoxing Cui, <email xlink:href="mailto:anja-xiaoxing.cui@univie.ac.at">anja-xiaoxing.cui@univie.ac.at</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-09">
<day>09</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1659797</elocation-id>
<history>
<date date-type="received">
<day>04</day>
<month>07</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>11</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>17</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Karbanova and Cui.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Karbanova and Cui</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-09">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Although the processing of language and music are thought to be related, the semantic interplay of these domains in song remains relatively unexplored. This study investigates how music and lyrics contribute to conceptual meaning-making in song interpretation using a conceptual priming experiment.</p>
</sec>
<sec>
<title>Methods</title>
<p>Fifty participants completed a lexical decision task in which target words were semantically related either to the music or to the lyrics of an ecologically valid song prime. Reaction times were used to infer semantic alignment.</p>
</sec>
<sec>
<title>Results and Discussion</title>
<p>The results showed significantly faster responses to target words associated with the music than to those associated with the lyrics of the prime. This effect remained significant even after controlling for various properties of the primes and targets, which had been assessed by an additional 234 participants in complementary studies prior to the priming experiment. We also found a significant interaction between target type (music- vs. lyrics-related) and the Euclidean distance of valence and arousal between the prime and target: affective distance predicted reaction times only for music-derived targets. Ratings from the complementary studies indicated that music evoked more positive and arousing responses than lyrics, while lyrics appeared to dampen the affective intensity of musical excerpts. Our findings challenge the assumption of tight integration between melody and lyrics in song processing. They suggest that music and language contribute unequally to conceptual interpretation in song, with music playing a more dominant role. These results offer new insights into the construction of multi-modal meanings and the cognitive mechanisms underlying song comprehension.</p>
</sec></abstract>
<kwd-group>
<kwd>cognitive linguistics</kwd>
<kwd>conceptual priming</kwd>
<kwd>musical semantics</kwd>
<kwd>music-language interactions</kwd>
<kwd>song interpretation</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This study was funded by the Specific Research Program of the Masaryk University, Brno, Czechia. Open access funding provided by the University of Vienna.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="70"/>
<page-count count="13"/>
<word-count count="10161"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Performance Science</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Although language and music share many structural features and have been extensively studied with respect to shared cognitive processes, research on interactions between their semantic aspects remains relatively scarce. This lack of research is particularly surprising, given the unique fusion of linguistic and musical elements carrying meaning in songs (<xref ref-type="bibr" rid="B51">Peretz et al., 2004</xref>), which constitute a musical behavior found in all known human cultures (<xref ref-type="bibr" rid="B43">Mehr et al., 2019</xref>). Studies of song perception thus provide a valuable framework for investigating the interrelationship between language, specifically lyrics, and music perception.</p>
<p>Recent research on music-language interactions has been dominated by topics other than semantics (<xref ref-type="bibr" rid="B46">Nayak et al., 2022</xref>; <xref ref-type="bibr" rid="B22">Fiveash et al., 2021</xref>; <xref ref-type="bibr" rid="B69">Temperley, 2022</xref>; <xref ref-type="bibr" rid="B31">Jansen et al., 2023</xref>). Existing studies addressing the relationship between music and lyrics specifically have primarily focused on memory and recognition (<xref ref-type="bibr" rid="B17">Crowder et al., 1990</xref>; <xref ref-type="bibr" rid="B59">Samson and Zatorre, 1991</xref>; <xref ref-type="bibr" rid="B61">Serafine et al., 1984</xref>; <xref ref-type="bibr" rid="B51">Peretz et al., 2004</xref>), or the relative influence of lyrics and music on mood and perceived emotion (<xref ref-type="bibr" rid="B65">Stratton and Zalanowski, 1994</xref>; <xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>; <xref ref-type="bibr" rid="B49">Omigie, 2015</xref>). The latter studies can be read as investigating the interplay of semantic processing of lyrics and music, in so far as the expressed emotion is part of the semantic content of both lyrics and music (<xref ref-type="bibr" rid="B63">Steinbeis and Koelsch, 2011</xref>).</p>
<sec>
<label>1.1</label>
<title>Processing of emotion in song</title>
<p>An early study examining the relative importance of different emotional sources in music compared singing with instrumental versions (<xref ref-type="bibr" rid="B65">Stratton and Zalanowski, 1994</xref>). It found that, in the absence of singing, listeners were unable to identify the intended emotion of the music. Even when the melody was upbeat, the presence of sad lyrics heightened feelings of depression in listeners. The authors concluded that lyrics play a more significant role than vocals or instrumental elements in shaping listeners&#x00027; emotional responses. These findings align with research suggesting that lyrics also dominate song memory (<xref ref-type="bibr" rid="B52">Peynircio&#x0011F;lu et al., 1998</xref>; <xref ref-type="bibr" rid="B61">Serafine et al., 1984</xref>).</p>
<p>However, these findings contrast with those of another study that examined the potential dominance of lyrics in shaping affective responses by comparing lyrics without vocals to instrumental music (<xref ref-type="bibr" rid="B62">Sousou, 1997</xref>). In this study, participants read happy or sad lyrics while listening to either happy or sad instrumental music. The participants&#x00027; mood was influenced by background music rather than lyrical content, suggesting that, in the absence of vocal cues, instrumental music exerted a stronger effect on affective responses than lyrics alone. And in yet another study, when a song in an unknown language was presented in which sad lyrics were accompanied by happy music, happiness ratings were greater than sadness ratings. But when the translation of the sad lyrics was also presented on the screen, neither music nor lyrics dominated and ratings of happiness and sadness were statistically indistinguishable (<xref ref-type="bibr" rid="B45">Mori and Iwanaga, 2013</xref>).</p>
<p>Similar conclusions were drawn in a separate study (<xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>), which compared the emotional influence of instrumentals with that of vocals and lyrics. Here, the intended emotion of the instrumental track was found to dominate listeners&#x00027; affective responses, overriding the emotional content of added vocals and lyrics. Notably, the authors also observed an interaction between the emotion conveyed and the presence of vocals and lyrics: when these were emotionally congruent with the instrumental, they intensified sadness but diminished feelings of happiness. Several studies have reported a distinct processing of happy and sad music, whereby the dominance of vocals and lyrics increases in sad music (<xref ref-type="bibr" rid="B13">Brattico et al., 2011</xref>; <xref ref-type="bibr" rid="B70">Vidas et al., 2020</xref>). However, this distinction is not found in all cultures (<xref ref-type="bibr" rid="B3">Barradas and Sakka, 2022</xref>).</p>
</sec>
<sec>
<label>1.2</label>
<title>Lyrics and melody processing&#x02014;Integrated or independent?</title>
<p>Research on song perception has also explored whether the concurrently presented auditory streams of lyrics and melody are processed independently or as an integrated whole. Neurological evidence points to both shared and specialized pathways for music and language processing.</p>
<p>The view of integrated processing is buttressed by converging evidence from neuroimaging and behavioral studies. These studies report neural overlap in the processing of speech and music (<xref ref-type="bibr" rid="B56">Rogalsky et al., 2011</xref>; <xref ref-type="bibr" rid="B60">Schulze et al., 2011</xref>), shared neural circuitry (<xref ref-type="bibr" rid="B68">Tallal and Gaab, 2006</xref>), overlapping cortical and subcortical networks (<xref ref-type="bibr" rid="B2">Asaridou and McQueen, 2013</xref>), and even neural populations selectively responsive to sung music located adjacent to regions specialized for speech and instrumental music (<xref ref-type="bibr" rid="B48">Norman-Haignere et al., 2022</xref>). Additional evidence points to overlapping mechanisms for semantic processing in language and music (<xref ref-type="bibr" rid="B15">Calma-Roddin and Drury, 2020</xref>; <xref ref-type="bibr" rid="B44">Miranda and Ullman, 2007</xref>), as well as between speech and song (<xref ref-type="bibr" rid="B57">Rossi et al., 2020</xref>). Consistent with this view, <xref ref-type="bibr" rid="B59">Samson and Zatorre (1991)</xref> found that songs are better remembered when accompanied by lyrics.</p>
<p>Other research however supports the independence hypothesis. <xref ref-type="bibr" rid="B12">Bonnel et al. (2001)</xref> found no interference between semantic and melodic processing in a divided attention task, implying that lyrics and melody may be processed as distinct perceptual Gestalts (<xref ref-type="bibr" rid="B14">Bregman, 1990</xref>). Likewise, <xref ref-type="bibr" rid="B6">Besson et al. (1998)</xref> provided evidence for the independence of linguistic and melodic components in songs. A more nuanced perspective is offered by <xref ref-type="bibr" rid="B58">Sammler et al. (2010)</xref>, who demonstrated interactive processing of lyrics and melodies in the left middle superior temporal sulcus at a prelexical level, but a functional dissociation in more anterior regions. This supports the idea of a posterior&#x02013;anterior gradient, along which integration and separation vary. Still, the degree of integration or separation might be task-dependent (<xref ref-type="bibr" rid="B40">LaCroix et al., 2015</xref>).</p>
<p>On the whole, the existing literature suggests that the contributions of lyrics and music to emotion processing in song are unequal, thereby supporting the independence hypothesis. As outlined earlier, some older studies indicate that lyrics exert a stronger influence on affective responses (<xref ref-type="bibr" rid="B65">Stratton and Zalanowski, 1994</xref>; <xref ref-type="bibr" rid="B52">Peynircio&#x0011F;lu et al., 1998</xref>; <xref ref-type="bibr" rid="B61">Serafine et al., 1984</xref>), while others find that music plays the leading role (<xref ref-type="bibr" rid="B62">Sousou, 1997</xref>; <xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>). More recent studies suggest that the relative influence of lyrics and music may depend on the specific emotional content being conveyed (<xref ref-type="bibr" rid="B13">Brattico et al., 2011</xref>; <xref ref-type="bibr" rid="B70">Vidas et al., 2020</xref>).</p>
<p>However, these studies face methodological limitations. For instance, research using popular and familiar music (<xref ref-type="bibr" rid="B13">Brattico et al., 2011</xref>; <xref ref-type="bibr" rid="B70">Vidas et al., 2020</xref>) cannot fully control for genre-specific trends toward positive affect in popular music (<xref ref-type="bibr" rid="B29">Interiano et al., 2018</xref>), nor for personal memories associated with familiar songs. Such memories can themselves evoke emotional responses, and when triggered by music, they are often associated with positive affect (<xref ref-type="bibr" rid="B18">Cuddy et al., 2017</xref>; <xref ref-type="bibr" rid="B30">Jakubowski et al., 2020</xref>; <xref ref-type="bibr" rid="B38">Krumhansl and Zupnick, 2013</xref>; <xref ref-type="bibr" rid="B50">Parks and Clancy Dollinger, 2014</xref>). Moreover, in some studies, the instrumental stimuli were drawn from instrumental pieces (<xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>; <xref ref-type="bibr" rid="B62">Sousou, 1997</xref>), which were never intended to accompany vocals or lyrics. This may have introduced a bias in favor of instrumental dominance in affective responses.</p>
</sec>
<sec>
<label>1.3</label>
<title>Present investigation</title>
<p>Our study seeks to address these limitations through rigorous stimulus validation, while also extending the focus beyond emotional responses to examine broader conceptual processing in song. While studies investigating event-related potentials suggest that instrumental music can evoke concepts beyond emotion, such as &#x0201C;wideness&#x0201D; (<xref ref-type="bibr" rid="B36">Koelsch et al., 2004</xref>) or &#x0201C;battle&#x0201D; (<xref ref-type="bibr" rid="B54">Proverbio et al., 2022</xref>), to the best of our knowledge, no study has studied the processing of semantic content when presented simultaneously both in musical and linguistic means.</p>
</sec>
</sec>
<sec id="s2">
<label>2</label>
<title>Methods</title>
<p>Here, we used a conceptual priming paradigm with relatively unfamiliar and ecologically valid song stimuli composed and written by French polymath Boris Vian. Participants listened to short excerpts from songs and then completed a lexical decision task on a target word that was semantically related either to the lyrical or musical component of the excerpt. We hypothesized that participants would perform faster in the lexical decision task when the target word was conceptually related to the modality which dominates song perception, as conceptual priming should facilitate processing of congruent targets and potentially inhibit incongruent ones. In doing so, we aim to contribute to the ongoing debate on whether music and language processing operate as integrated or independent systems by examining how each modality drives semantic processing of song.</p>
<p>Below, we provide details about the participants, stimuli, procedure, and analysis of the lexical decision task. Within the sections regarding the psychological properties of stimuli, we provide details about additional participants and how the properties were assessed. Given that the language of the presented stimuli was French, only native French speakers were invited to participate in the experiment. All participants gave informed consent. All statistical analyses were performed using RStudio (<xref ref-type="bibr" rid="B55">R Core Team, 2023</xref>), and a significance threshold of <italic>p</italic> &#x0003C; 0.05 was used unless stated otherwise. The study procedures were reviewed by the ethics review board of Masaryk University, CZ.</p>
<sec>
<label>2.1</label>
<title>Participants</title>
<p>For the conceptual priming experiment, we recruited 50 participants (31 identified as male; as age was assessed in brackets, we report the median age group of 20&#x02013;40 years here as well as for the participant groups below). 16 participants completed the experiment in a laboratory setting, while the remaining participants were recruited through mailing lists. For the online group, a brief pre-test meeting was conducted via Zoom to establish rapport and explain the procedure.</p>
<p>Before the lexical decision task, participants completed a demographic questionnaire and an integrated informed consent form on the Gorilla&#x02122; platform. Participants also answered a number of <italic>ad hoc</italic> questionnaire items, which aimed to assess subjective bias toward different components of the songs (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S1</xref>). Participants received compensation of 12 euros (or the local currency equivalent) for their participation.</p>
</sec>
<sec>
<label>2.2</label>
<title>Stimuli&#x02014;Primes</title>
<p>We selected songs by Boris Vian based on the assumption that, even in France and other French-speaking regions, he is predominantly known as a writer rather than as a musician, and even less so as a composer. We restricted our stimulus set to 13 songs for which Boris Vian was both the lyricist and composer, and for which musical scores were available. The arrangements for voice and piano were obtained from &#x000C9;ditions Jacques Canetti. To the best of our knowledge, several of these songs were recorded for the first time by our team, allowing the musicians a high degree of interpretative freedom during the recording process. Participants&#x00027; familiarity with Vian&#x00027;s musical work was low: Only 10% of all participants reported being familiar with Vian&#x00027;s music (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S2</xref>). This supports the conclusion that our stimuli were unlikely to evoke personal memories or associations, minimizing potential confounds related to prior exposure.</p>
<p>From the pool of 13 songs, excerpts were selected such that approximately half of the selected excerpts featured passages in which the lyrical and musical semantic content differed. In some cases, different lyrics were set to identical musical accompaniments. These selections were made based on the subjective judgment of AK, and subsequently tested through two preliminary studies (detailed below).</p>
<p>In total, 25 excerpts were selected, each averaging 17 seconds in duration. Each excerpt was recorded in three distinct versions:</p>
<list list-type="order">
<list-item><p>ML (Music &#x0002B; Lyrics): A complete version combining the sung melody (soprano voice) with piano accompaniment.</p></list-item>
<list-item><p>M (Music only): An instrumental version in which the vocal melody was performed by a viola&#x02014;a timbre previously shown to evoke voice-like qualities (<xref ref-type="bibr" rid="B53">Proverbio and Piotti, 2022</xref>)&#x02014;with piano accompaniment.</p></list-item>
<list-item><p>L (Lyrics only): A spoken version of the lyrics delivered by a female voice to match the soprano voice from the ML version.</p></list-item>
</list>
<p>The M and L versions were used to generate target words and were evaluated in preliminary studies regarding a number of variables (detailed below). The ML versions&#x02014;combining both linguistic and musical information&#x02014;served as the prime stimuli in the conceptual priming task.</p>
<sec>
<label>2.2.1</label>
<title>Latent affective properties of recordings</title>
<p>To assess the latent affective dimensions of the musical stimuli, two groups of participants were recruited (54 male, median age group: 20&#x02013;40 years). One group (<italic>N</italic> = 42) evaluated the ML version while the other group (<italic>N</italic> = 55) evaluated the M and L versions of the excerpts. Because version M contained no linguistic content and version L no musical content, it was highly unlikely that participants would be able to associate excerpts from these two versions with each other. As such, we deemed it methodologically acceptable to use the same group to evaluate both M and L stimuli versions. All participants were recruited online.</p>
<p>Each participant rated the excerpts on a 7-point Likert scale according to two affective dimensions: valence (ranging from unpleasant to pleasant) and arousal (ranging from calm to excited). These dimensions were chosen in accordance with established circumplex models of emotional experience, particularly in the context of music perception, where valence and arousal are known to play a central role (<xref ref-type="bibr" rid="B16">C&#x000E9;spedes-Guevara and Eerola, 2018</xref>; <xref ref-type="bibr" rid="B20">Eerola and Vuoskoski, 2011</xref>). The goal of collecting these ratings was to assess the potential influence of emotional content on participants&#x00027; responses in the lexical decision task. Emotional and affective states function as top-down influences that shape perceptual and cognitive processing, guide attention, and facilitate access to emotionally congruent information. In this way, the emotional tone of a stimulus can bias perception and interpretation from the earliest stages of processing.</p>
</sec>
</sec>
<sec>
<label>2.3</label>
<title>Stimuli&#x02014;Targets</title>
<p>To derive appropriate target words for use in the priming task, we recruited two additional groups of participants (<italic>N</italic> = 99, 60 male, median age group: 20&#x02013;40 years). The subjects were asked to provide free associations in response to auditory excerpts from each of the three stimulus versions. Participants were instructed to list up to four words that spontaneously came to mind while listening to each excerpt. They were encouraged to respond quickly and intuitively, in order to capture immediate conceptual associations and minimize the influence of reflective or strategic thinking. The first group (<italic>N</italic> = 47) evaluated excerpts from version ML, while the second group (<italic>N</italic> = 52) listened to excerpts from versions M and L. All participants were recruited online.</p>
<p>Following data cleaning (lower-casing, lemmatization, and normalization), the most frequently mentioned associations were grouped into semantically homogeneous categories based on their conceptual proximity. This categorization was performed by AK using semantic analysis, which examines the meanings of words and their relationships in order to group them into shared semantic fields, and content analysis, which systematically categorizes words based on emergent thematic criteria to interpret patterns of meaning. Despite potential biases, manual annotation remains a standard and accepted practice in studies of this kind (<xref ref-type="bibr" rid="B8">Bolognesi et al., 2017</xref>). From these categorized association fields, the most representative terms were selected as target words for the main experiment (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S3</xref>).</p>
<sec>
<label>2.3.1</label>
<title>Cosine similarity</title>
<p>To control for the potential influence of semantic congruency between the musical and lyrical components on reaction times in the conceptual priming task, we computed cosine similarity between the sets of associations generated in response to versions M and L. Conceptual similarity can be quantified in various ways, depending on the underlying theoretical framework. In distributional semantic models, for example, word similarity is inferred from linguistic co-occurrence patterns&#x02014;words that occur in similar contexts tend to share similar meanings (<xref ref-type="bibr" rid="B21">Fernandino et al., 2022</xref>). For example, in distributional semantic models, the words doctor and nurse are inferred to be semantically similar because they frequently occur in similar linguistic contexts (e.g., hospital, patient, and treatment), despite differing in form.These models use word vectors that represent lexical items as points in a high-dimensional space, allowing semantic similarity to be measured geometrically.</p>
<p>To implement this analysis, the association data were tokenized and transformed into vector representations using pre-trained embeddings from the open-source FastText library (<xref ref-type="bibr" rid="B34">Joulin et al., 2017</xref>), where each word is represented as a 300-dimensional vector based on contextual usage across large text corpora. We performed pairwise cosine similarity comparisons between the association sets generated for the ML and M versions, the ML and L versions, and the M and L versions. The goal was to determine whether the integrated excerpts (ML) semantically aligned more closely with either of their individual components.</p>
<p>Since cosine similarity was calculated using publicly available vectors, we employed repeated-measures ANOVAs. These analyses revealed no significant differences in cosine similarity between any of the version pairings (<italic>F</italic><sub>(2, 48)</sub> &#x0003D; 3.32, Greenhouse-Geisser corrected <italic>p</italic> &#x0003D; 0.066). A paired t-test showed that the semantic proximity between targets generated from version ML and version M was statistically indistinguishable from that between version ML and version L (<italic>t</italic><sub>(22)</sub> &#x0003D; &#x02212;0.82, <italic>p</italic> &#x0003D; 0.42, 95% CI [&#x02212;0.10, 0.043], mean difference = &#x02013;0.028). This suggests that neither the musical nor the lyrical component alone is a better predictor of the conceptual associations evoked by the combined version.</p>
<p>Overall, the corpus showed a low degree of semantic similarity across associations, indicating that while the concepts were related, they were not highly overlapping. In their study, <xref ref-type="bibr" rid="B26">Hiebel et al. (2022)</xref> used a similarity threshold of 0.4, above which concepts are considered semantically close. In our data, only one stimulus in each of the ML&#x02013;M and L&#x02013;M comparison exceeded this threshold. For M&#x02013;L comparisons, only three stimuli reached values above 0.3. Notably, these were also the stimuli we had pre-identified as congruent, validating our initial judgment through quantitative analysis.</p>
</sec>
<sec>
<label>2.3.2</label>
<title>Latent semantic properties of the target words</title>
<p>Words have formal and semantic characteristics that may influence the speed and ease with which they can be recognized and understood (<xref ref-type="bibr" rid="B66">Syssau and Font, 2005</xref>). It is therefore essential to identify and account for such features when interpreting results from semantic tasks. <xref ref-type="bibr" rid="B21">Fernandino et al. (2022)</xref> further demonstrated that conceptual coding theory, which models concepts as grounded in experiential content, outperforms competing models in predicting conceptual behavior. This highlights the importance of incorporating experiential dimensions such as sensory experience and concreteness when investigating how words influence cognitive processes.</p>
<p>To control for the potential influence of such latent semantic properties of target words on participants&#x00027; responses in the lexical decision task, a new group of participants was recruited online to evaluate the target words on their subjective frequency, valence, arousal, imageability, concreteness, and sensory experience using 7-point Likert scales (<italic>N</italic> = 38, 16 male, median age group: 40&#x02013;60 years). These variables were selected based on prior research highlighting their relevance in lexical processing and semantic cognition.</p>
<p>To assess how the recording version from which the target word was derived affected semantic ratings, we fitted a linear mixed-effects model for each of the six dimensions, including random intercepts for both the participants and the items. Because mixed-effects models account for inter-individual variability in a statistically robust manner, we did not conduct separate tests of internal consistency or reliability of participants&#x00027; ratings (<xref ref-type="bibr" rid="B19">Desrochers and Thompson, 2009</xref>; <xref ref-type="bibr" rid="B10">Bonin et al., 2003</xref>, <xref ref-type="bibr" rid="B11">2015</xref>).</p>
<p>Setting version M (music only) as the reference level, the results showed a statistically significant decrease in ratings for version L (lyrics only) across all dimensions except concreteness, after Bonferroni correction (&#x003B1; &#x0003D; 0.008). Participants consistently rated targets associated with version L lower than those from version M. These differences are illustrated in <xref ref-type="fig" rid="F1">Figure 1</xref>, with significant effects after alpha correction marked by asterisks. Interestingly, this systematic reduction in ratings for version L is somewhat unexpected, given that the target words themselves do not show any obvious semantic disparities (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S3</xref>).</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Ratings of music version-derived (M) and lyrics version-derived (L) target words for each latent semantic property. Three asterisks (&#x0002A;&#x0002A;&#x0002A;) indicate a significant effect.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1659797-g0001.tif">
<alt-text content-type="machine-generated">Boxplot graphic with six panels comparing scores of arousal, concreteness, frequency, imagery, SER, and valence for two groups labeled M and L indicating music version-derived and lyrics version-derived target word groups respectively, with p-values and significant differences as indicated by asterisks</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>2.3.3</label>
<title>Lexical properties of target words</title>
<p>Lexical factors such as word length and orthographic neighborhood size are well-established predictors of response times and accuracy in lexical decision tasks (<xref ref-type="bibr" rid="B32">Jiang et al., 2025</xref>; <xref ref-type="bibr" rid="B64">Stenneken et al., 2007</xref>; <xref ref-type="bibr" rid="B25">Grainger, 1990</xref>). Consequently, both experiential and linguistic information must be integrated to form a comprehensive representation of semantic and conceptual knowledge. In addition to semantic variables, we also gathered several objective lexical measures from the Lexique 3 database (<xref ref-type="bibr" rid="B47">New et al., 2004</xref>). These included lemma frequency in films, number of letters, number of homographs and homophones, number of syllables, and counts of phonological and orthographic neighbors. The selection of these variables was guided by prior research highlighting their relevance to lexical processing (<xref ref-type="bibr" rid="B27">Hino et al., 2013</xref>).</p>
<p>To assess potential differences between target words based on the recording version from which they were derived (M vs. L), we conducted a series of independent-samples <italic>t</italic>-tests. Because these analyses relied on publicly available scores, we could not account for random factors such as participant or item, as was done in other statistical models in this study. After Bonferroni correction for multiple comparisons, none of the differences reached statistical significance.</p>
</sec>
<sec>
<label>2.3.4</label>
<title>Pseudo-word targets</title>
<p>In lexical decision tasks, where pseudo-words conform to the orthographic and phonological rules of the target language, participants&#x00027; decisions rely primarily on the semantic features of real words (<xref ref-type="bibr" rid="B67">Syssau and Lax&#x000E9;n, 2012</xref>). In contrast, pseudo-words which violate these linguistic conventions are easily dismissed based on superficial features alone, such as letter combinations or pronounceability. Their rejection does not require access to lexical or semantic memory, leading to shallower processing. Following established procedures in the field (e.g., <xref ref-type="bibr" rid="B37">Kousta et al., 2009</xref>; <xref ref-type="bibr" rid="B67">Syssau and Lax&#x000E9;n, 2012</xref>), we constructed orthographically and phonologically legal pseudo-words by altering a single vowel in each target word (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S3</xref>). This method ensured that the resulting pseudo-words were pronounceable, orthographically legal and matched in length (number of syllables) with the real experimental prime.</p>
</sec>
</sec>
<sec>
<label>2.4</label>
<title>Procedure</title>
<p>We employed a cross-random-effects design using a lexical decision task, in which participants judged whether a given letter string was a real word or a pseudo-word. The primary objective was to compare reaction times (RTs) to real target words originating from excerpt versions M and L, presented in response to the ML recordings which were the primes. RTs were interpreted as an index of conceptual proximity between prime and target&#x02014;shorter RTs were taken to reflect a stronger semantic association.</p>
<p>We randomized the trial order for each participant. During each trial, a fixation cross appeared at the center of the screen where the target word would subsequently appear. Participants were instructed to fixate on the cross to minimize eye movements and enable faster responses. They were asked to decide as quickly as possible whether the displayed word was a real French word (pressing the &#x0201C;right&#x0201D; arrow key) or a pseudo-word (pressing the &#x0201C;left&#x0201D; arrow key). If a response took longer than 500 ms, a &#x0201C;Answer faster!&#x0201D; prompt appeared.</p>
<p>Following each trial, a burst of white noise was played. This served two purposes: to mask the auditory trace of the preceding excerpt and to function as a pacing mechanism. Participants could only proceed after pressing the space bar, ensuring attentiveness and allowing them to advance at their own pace. They were instructed to keep their right-hand fingers positioned on the &#x0201C;left&#x0201D; and &#x0201C;right&#x0201D; arrow keys throughout the task to enable rapid responses.</p>
<p>The estimated duration of the test was approximately 45 min. However, since participants progressed at their own pace, the actual durations varied. We provided headphones for in-lab participants and encouraged remote participants to use headphones as well.</p>
</sec>
<sec>
<label>2.5</label>
<title>Analysis</title>
<p>Our first analysis examined the central question of whether musical or lyrical meanings are accessed more quickly when participants are primed with a song. To assess this, we first log-transformed the RTs in the lexical decision task to reduce positive skew and approximate a normal distribution, in accordance with standard practice in the field (<xref ref-type="bibr" rid="B37">Kousta et al., 2009</xref>). Prior to statistical analysis, we removed outliers using the IQR method. <xref ref-type="fig" rid="F2">Figure 2</xref> presents violin plots summarizing the distribution of log-transformed RTs.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Logarithmically transformed reaction times to music version-derived (M) and lyrics version-derived (L) target words (outliers removed).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1659797-g0002.tif">
<alt-text content-type="machine-generated">Violin plot comparing log reaction times for two conditions labeled M and L, with individual data points shown as black dots, distributions shaded pink and blue, and connecting lines indicating paired data.</alt-text>
</graphic>
</fig>
<p>Next, we accounted for a possible influence of the prime properties by controlling for their inner congruency using two separate scores: the Euclidean distance and the cosine similarity between their two constituting components, that is, the distance between versions M and L as recordings in the Euclidean distance score, and as conceptual associations in the cosine similarity score. We further controlled for a possible influence of target word properties, namely, the latent semantic properties (Section 2.3.2) and the lexical properties (Section 2.3.3). We also controlled for a possible influence of the semantic distance between prime and target and a potential interaction between these two effects. In order to account for individual differences between participants, we first explored the correlation between their preferences toward one or the other song component in song listening, as self-assessed in an <italic>ad hoc</italic> questionnaire, and their implicit bias, as indexed using their RTs. Second, we evaluated the possible influence of individual variability of participants by controlling for the effect of demographic variables.</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<sec>
<label>3.1</label>
<title>Effect of stimulus version on affective ratings</title>
<p>To assess the effect of stimulus version on affective ratings, we fitted a maximum likelihood mixed-effects model with random intercepts for both participants and items (see <xref ref-type="fig" rid="F3">Figure 3</xref>). The reference condition was set to version ML. For valence ratings, version M was associated with significantly higher ratings compared to version ML (&#x003B2; &#x0003D; 0.54, <italic>p</italic> &#x0003D; 0.023), Bonferroni-corrected &#x003B1; &#x0003D; 0.025 while version L showed a weaker and non-significant effect (&#x003B2; &#x0003D; &#x02212;0.32, <italic>p</italic> &#x0003D; 0.175). For arousal ratings, while the trend remained the same, neither of the comparisons was significant (version M: &#x003B2; &#x0003D; 0.44, <italic>p</italic> &#x0003D; 0.089, version L: &#x003B2; &#x0003D; &#x02212;0.23, <italic>p</italic> &#x0003D; 0.3).</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Latent affective properties of the three recording versions. Three asterisks (&#x0002A;&#x0002A;&#x0002A;) indicate a significant effect. <bold>(A)</bold> Valence ratings. <bold>(B)</bold> Arousal ratings.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1659797-g0003.tif">
<alt-text content-type="machine-generated">Box plots illustrating valence and arousal ratings for three experimental versions labeled M, ML, and V. Panel A shows higher valence for version M compared to ML and V with a statistically significant difference marked by three asterisks between M and ML. Panel B depicts arousal ratings distributed similarly across the same versions, without marked statistical significance. Each box plot includes individual data points, medians, interquartile ranges, and some outliers.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>3.2</label>
<title>Effect of target version</title>
<p>We fitted a linear mixed-effects model using maximum likelihood estimation to examine the effect of target version on RTs, while accounting for random intercepts for participants and primes. A random slope for participants was not included due to convergence issues and model overfitting.</p>
<p>The model demonstrated good fit (AIC = &#x02212;1, 476.9, BIC = &#x02212;1, 449.0, log-likelihood = 743.5). Random effects showed a variance of 0.034 for subject intercepts and 0.002 for the primes, while the residual variance was 0.025. The fixed-effect intercept was estimated at 6.456, while the effect of the version L-derived target words (lyrics associated concept) was estimated at 0.0294 (<italic>p</italic> &#x0003C; 0.001), reflecting a statistically significant increase in RTs relative to version M-derived target words (music associated concept), corresponding to 17 ms (see <xref ref-type="table" rid="T1">Table 1</xref>).</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Summary table of the influence of the latent semantic and lexical properties of target words on the effect of version.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left" rowspan="2"><bold>Model</bold></th>
<th valign="top" align="center" colspan="3">Reaction time (ms)</th>
<th valign="top" align="center" rowspan="2"><bold>95% CI (ms)</bold></th>
<th valign="top" align="center" rowspan="2"><bold>SE (ms)</bold></th>
<th valign="top" align="center" rowspan="2"><bold>df</bold></th>
<th valign="top" align="center" rowspan="2"><bold>t</bold></th>
<th valign="top" align="center" rowspan="2"><bold>p</bold></th>
</tr>
<tr>
<th valign="top" align="center"><bold>M</bold></th>
<th valign="top" align="center"><bold>L</bold></th>
<th valign="top" align="center">&#x00394;</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Basic</td>
<td valign="top" align="center">637</td>
<td valign="top" align="center">655</td>
<td valign="top" align="center">&#x0002B;17</td>
<td valign="top" align="center">646&#x02013;663</td>
<td valign="top" align="center">7.1</td>
<td valign="top" align="center">1989</td>
<td valign="top" align="center">3.91</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Arousal</td>
<td valign="top" align="center">646</td>
<td valign="top" align="center">664</td>
<td valign="top" align="center">&#x0002B;18</td>
<td valign="top" align="center">653&#x02013;675</td>
<td valign="top" align="center">8.7</td>
<td valign="top" align="center">1523</td>
<td valign="top" align="center">3.27</td>
<td valign="top" align="center">0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Concreteness</td>
<td valign="top" align="center">615</td>
<td valign="top" align="center">635</td>
<td valign="top" align="center">&#x0002B;20</td>
<td valign="top" align="center">625&#x02013;645</td>
<td valign="top" align="center">8.2</td>
<td valign="top" align="center">1558</td>
<td valign="top" align="center">3.99</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Subjective Frequency</td>
<td valign="top" align="center">687</td>
<td valign="top" align="center">701</td>
<td valign="top" align="center">&#x0002B;14</td>
<td valign="top" align="center">687&#x02013;714</td>
<td valign="top" align="center">9.9</td>
<td valign="top" align="center">418</td>
<td valign="top" align="center">2.06</td>
<td valign="top" align="center">0.040</td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Imageability</td>
<td valign="top" align="center">669</td>
<td valign="top" align="center">686</td>
<td valign="top" align="center">&#x0002B;17</td>
<td valign="top" align="center">674&#x02013;698</td>
<td valign="top" align="center">8.9</td>
<td valign="top" align="center">1247</td>
<td valign="top" align="center">2.88</td>
<td valign="top" align="center">0.004</td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Sensory Experience</td>
<td valign="top" align="center">653</td>
<td valign="top" align="center">671</td>
<td valign="top" align="center">&#x0002B;18</td>
<td valign="top" align="center">660&#x02013;682</td>
<td valign="top" align="center">8.9</td>
<td valign="top" align="center">1167</td>
<td valign="top" align="center">3.06</td>
<td valign="top" align="center">0.002<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Valence</td>
<td valign="top" align="center">645</td>
<td valign="top" align="center">662</td>
<td valign="top" align="center">&#x0002B;17</td>
<td valign="top" align="center">650&#x02013;674</td>
<td valign="top" align="center">9.6</td>
<td valign="top" align="center">1478</td>
<td valign="top" align="center">2.78</td>
<td valign="top" align="center">0.006</td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Film Frequency</td>
<td valign="top" align="center">722</td>
<td valign="top" align="center">741</td>
<td valign="top" align="center">&#x0002B;19</td>
<td valign="top" align="center">731&#x02013;752</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1920</td>
<td valign="top" align="center">3.70</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Letters</td>
<td valign="top" align="center">602</td>
<td valign="top" align="center">619</td>
<td valign="top" align="center">&#x0002B;16</td>
<td valign="top" align="center">610&#x02013;628</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1929</td>
<td valign="top" align="center">3.69</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Homographs</td>
<td valign="top" align="center">651</td>
<td valign="top" align="center">670</td>
<td valign="top" align="center">&#x0002B;19</td>
<td valign="top" align="center">661&#x02013;680</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1929</td>
<td valign="top" align="center">4.10</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Homophones</td>
<td valign="top" align="center">644</td>
<td valign="top" align="center">664</td>
<td valign="top" align="center">&#x0002B;20</td>
<td valign="top" align="center">655&#x02013;674</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1929</td>
<td valign="top" align="center">4.29</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Orth. Neighbors</td>
<td valign="top" align="center">640</td>
<td valign="top" align="center">659</td>
<td valign="top" align="center">&#x0002B;19</td>
<td valign="top" align="center">650&#x02013;669</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1929</td>
<td valign="top" align="center">4.09</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Phon. Neighbors</td>
<td valign="top" align="center">641</td>
<td valign="top" align="center">660</td>
<td valign="top" align="center">&#x0002B;19</td>
<td valign="top" align="center">650&#x02013;669</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1929</td>
<td valign="top" align="center">3.99</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Syllables</td>
<td valign="top" align="center">572</td>
<td valign="top" align="center">590</td>
<td valign="top" align="center">&#x0002B;18</td>
<td valign="top" align="center">582&#x02013;599</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1924</td>
<td valign="top" align="center">4.43</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The estimates show LogRTs transformed back into milliseconds for better interpretation. Significant effects after Bonferroni correction (<italic>p</italic> &#x0003C; 0.004) are marked with an asterisk (<sup>&#x0002A;</sup>). The <italic>p</italic> value column indicates whether the effect of version remains significant after controlling for different other variables.</p>
</table-wrap-foot>
</table-wrap>
<p>To validate the contribution of the target version predictor, we also fitted a reduced model without this variable. A likelihood ratio test between the two models confirmed a significant difference (<inline-formula><mml:math id="M1"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mn>16</mml:mn><mml:mo>.</mml:mo><mml:mn>756</mml:mn><mml:mo>,</mml:mo><mml:mstyle class="text"><mml:mtext class="textit" mathvariant="italic">p</mml:mtext></mml:mstyle><mml:mo>&#x0003C;</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>001</mml:mn></mml:math></inline-formula>), underscoring the explanatory power of target version.</p>
<p>The marginal <inline-formula><mml:math id="M2"><mml:msubsup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>0036</mml:mn></mml:math></inline-formula> and conditional <inline-formula><mml:math id="M3"><mml:msubsup><mml:mrow><mml:mi>R</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>596</mml:mn></mml:math></inline-formula>, calculated using the r.squaredGLMM function from the MuMIn package (<xref ref-type="bibr" rid="B4">Barto&#x00144;, 2024</xref>), indicated that while the fixed effect accounted for a small portion of the variance, the random effects contributed substantially. Cohen&#x00027;s <italic>d</italic>, calculated manually, was 0.57, indicating a moderate effect size.</p>
<p>We estimated confidence intervals using the confint function from the lme4 package (<xref ref-type="bibr" rid="B5">Bates et al., 2015</xref>). The 95% CI for the intercept (version M-derived target words) ranged from 6.40 to 6.51, reflecting a precise estimate. For version L-derived target words, the 95% CI ranged from 0.015 to 0.043, suggesting a small but reliable positive effect, as the interval does not include zero.</p>
</sec>
<sec>
<label>3.3</label>
<title>Effect of inner congruency of the primes</title>
<p>To assess the possible influence of prime properties on the semantic recognition of targets, we calculated its inner semantic congruency score based on each excerpt&#x00027;s position in a two-dimensional space defined by valence and arousal ratings (see Section 2.2.1). The effect of inner congruency was not statistically significant (<italic>p</italic> &#x0003D; 0.156). Similarly, we controlled for the congruency between M and L versions of the prime excerpts using the cosine similarity scores (Section 2.3.1). Adding this predictor did not yield a significant effect either (<italic>p</italic> &#x0003D; 0.867).</p>
<p>We further tested a congruency hypothesis from earlier studies, including <xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu (2006)</xref>, which reported that emotionally congruent combinations of lyrics and melody intensified perceived emotion. In our study, we used Euclidean distance in valence&#x02013;arousal space as a proxy for congruence between excerpt version M and L and examined its effect on excerpt version ML ratings. While Euclidean distance did not predict valence ratings (<italic>p</italic> &#x0003D; 0.839; <italic>R</italic><sup>2</sup><italic>m</italic> &#x0003D; 0.0018), it significantly predicted arousal ratings (<italic>p</italic> &#x0003D; 0.0304; <italic>R</italic><sup>2</sup><italic>m</italic> &#x0003D; 0.188), indicating that greater incongruence between music and lyrics was associated with increased emotional arousal.</p>
</sec>
<sec>
<label>3.4</label>
<title>Effect of latent semantic properties of the target words on RTs</title>
<p>To account for the semantic characteristics of the target words, we initially attempted to fit a full mixed-effects model including all latent semantic properties as fixed effects, along with target version and random intercepts for participants and primes. However, due to multicollinearity among the semantic variables, we instead ran separate models for each one, using version M-derived target words as the reference level.</p>
<p>Across all models, the estimated effect of target version changed slightly but remained statistically significant in all but the models controlling for subjective frequency, imagebility and valence (see <xref ref-type="table" rid="T1">Table 1</xref> for the estimates for the effect of version in the basic model and the models with additional predictors). To evaluate the impact of these three predictors, we conducted ANOVA model comparisons showing that none of those models significantly improved model fit (see <xref ref-type="table" rid="T2">Table 2</xref>).</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Model comparisons (likelihood ratio tests) assessing the effect of additional predictors beyond Version.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Model</bold></th>
<th valign="top" align="center"><bold>AIC</bold></th>
<th valign="top" align="center"><bold>BIC</bold></th>
<th valign="top" align="center"><bold>logLik</bold></th>
<th valign="top" align="center"><bold>&#x003C7;<sup>2</sup></bold></th>
<th valign="top" align="center"><bold>df</bold></th>
<th valign="top" align="center"><bold>p</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Basic</td>
<td valign="top" align="center">&#x02013;1,477.0</td>
<td valign="top" align="center">&#x02013;1,449.0</td>
<td valign="top" align="center">734.47</td>
<td/>
<td/>
<td/>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Subjective frequency</td>
<td valign="top" align="center">&#x02013;1,476.6</td>
<td valign="top" align="center">&#x02013;1,443.0</td>
<td valign="top" align="center">744.29</td>
<td valign="top" align="center">1.63</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.202</td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Imageability</td>
<td valign="top" align="center">&#x02013;1,475.7</td>
<td valign="top" align="center">&#x02013;1,442.1</td>
<td valign="top" align="center">743.85</td>
<td valign="top" align="center">0.75</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.388</td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Valence</td>
<td valign="top" align="center">&#x02013;1,475.2</td>
<td valign="top" align="center">&#x02013;1,441.7</td>
<td valign="top" align="center">743.62</td>
<td valign="top" align="center">0.29</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.588</td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Film frequency</td>
<td valign="top" align="center">&#x02013;1,490.5</td>
<td valign="top" align="center">&#x02013;1,457.0</td>
<td valign="top" align="center">751.26</td>
<td valign="top" align="center">15.57</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr>
<tr>
<td valign="top" align="left">&#x0002B; Syllables</td>
<td valign="top" align="center">&#x02013;1,490.3</td>
<td valign="top" align="center">&#x02013;1,456.8</td>
<td valign="top" align="center">751.15</td>
<td valign="top" align="center">15.35</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">&#x0003C; 0.001<sup>&#x0002A;</sup></td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>Significant effects after Bonferroni correction (<italic>p</italic> &#x0003C; 0.01) are marked with an asterisk (<sup>&#x0002A;</sup>). The <italic>p</italic> value column indicates whether the model with the added predictor was significantly improved over the basic model.</p>
</table-wrap-foot>
</table-wrap>
<p>Overall, none of the latent semantic predictors accounted for additional variance in RTs beyond that explained by target version. The RT difference between versions M and L thus remained robust even when controlling for these semantic factors.</p>
</sec>
<sec>
<label>3.5</label>
<title>Effect of lexical properties of target words on RTs</title>
<p>To evaluate the impact of the lexical properties of the target words on reaction times, we fitted mixed-effects models with random intercepts for participants and primes. The target version effect remained statistically significant in all models (see <xref ref-type="table" rid="T1">Table 1</xref>).</p>
<p>Only two models yielded additional significant effects, namely lemma frequency in films and number of syllables. Firstly, the lemma frequency in films had a significant negative effect on RTs (<italic>t</italic><sub>(73)</sub> &#x0003D; &#x02212;4.515, <italic>p</italic> &#x0003C; 0.001), indicating faster responses to more frequent words (<italic>R</italic><sup>2</sup><italic>m</italic> &#x0003D; 0.014, <italic>R</italic><sup>2</sup><italic>c</italic> &#x0003D; 0.59). Model comparisons confirmed that including the frequency in films predictor significantly improved model fit (see <xref ref-type="table" rid="T2">Table 2</xref>). Type III ANOVA indicated that both frequency in films and version predictors contributed significantly, with the frequency in films exerted a slightly greater impact (<italic>F</italic> = 20.39 for the frequency in films vs. <italic>F</italic> = 13.39 for target version), consistent with prior psycholinguistic findings on word frequency.</p>
<p>Secondly, the number of syllables had a significant positive effect on RTs (<italic>t</italic><sub>(29)</sub> &#x0003D; 4.383, <italic>p</italic> &#x0003C; 0.001), indicating slower responses to words with more syllables. Type III ANOVA revealed that number of syllables accounted for variance comparable to the target version effect (<italic>F</italic> = 19.20 vs. 19.60). Model comparisons confirmed improved fit with this predictor (see <xref ref-type="table" rid="T2">Table 2</xref>).</p>
</sec>
<sec>
<label>3.6</label>
<title>Effect of semantic distance between primes and target words</title>
<p>In order to control for a possible influence of the semantic distance between primes, that is, the ML recordings, and target words, that is, concepts derived from M or L recordings, we calculated their Euclidean distance using the valence and arousal scores obtained from pre-testing (for primes see Section 2.2.1, for target words see Section 2.3.2). A paired t-test showed that the difference between the two distances, that is, ML prime to version L-derived target word vs. ML prime to version M-derived target word, was not statistically significant (<italic>t</italic><sub>24</sub> &#x0003D; 1.82 <italic>p</italic> &#x0003D; 0.081).</p>
<p>Adding the semantic distance predictor to the model revealed its significant influence on the RTs (<italic>t</italic><sub>(523)</sub> &#x0003D; 2.058, <italic>p</italic> &#x0003D; 0.04). The further apart the affective profile of the target word is from the prime, the longer it takes participants to respond, suggesting that emotional distance between the prime and the target makes processing less efficient. The type III ANOVA showed that the effect of version remained statistically significant even when controlling for the semantic distance (<italic>F</italic><sub>(1, 1, 999.81)</sub> &#x0003D; 19.16, <italic>p</italic> &#x0003C; 0.001). Given that target words derived from version M recordings tended to be significantly higher rated on valence and arousal than target words derived from version L recordings (see Section 2.3.2), we also checked for a potential interaction between the effect of target version and semantic distance. The interaction was significant as confirmed by type III ANOVA (<italic>F</italic><sub>(1, 1, 679.89)</sub> &#x0003D; 6.13, <italic>p</italic> &#x0003D; 0.013) suggesting that the effect of semantic distance on RTs differs between version M- and version L-derived target words.</p>
<p>To follow up the significant interaction between target version and semantic distance on RTs, we conducted a simple slopes analysis using estimated marginal trends from the interactions package (<xref ref-type="bibr" rid="B41">Long, 2024</xref>). The analysis revealed that semantic distance had a significant positive effect on RTs in version M (b = 0.037, 95% CI [0.0135, 0.0606]) indicating that participants responded faster to the version M-derived target words when they were affectively closer to the primes (ML recordings). In contrast, no such effect was observed for version L-derived target words (b = &#x02212;0.0009, 95% CI [&#x02212;0.027, 0.025]) suggesting that emotional distance from the primes did not influence reaction times when the target word was derived from a lyrics only recording (see <xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Result of the simple slopes analysis of the interaction between the effect of version and Euclidean distance. Model-based 95% confidence intervals show the uncertainty around predicted LogRTs across Distance values, separately by Version, reflecting the expected range for the true mean given the model and data.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1659797-g0004.tif">
<alt-text content-type="machine-generated">Line chart illustrating predicted log response time as a function of Euclidean distance from prime to target. Version L shows a nearly flat trend, while version M exhibits an upward slope. Shaded regions represent confidence intervals.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>3.7</label>
<title>Implicit and explicit bias in song perception</title>
<p>Participants&#x00027; explicit biases were measured using responses from the self-report questionnaire (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table S1</xref>). Preference scores were mean-centered, such that positive values indicated a bias toward M and negative values toward L. To assess implicit bias, we computed the difference in participants&#x00027; log-transformed RTs between version M- and version L-derived target words. This RT difference served as an index of automatic, implicit preference for one component over the other.</p>
<p>A Pearson correlation between explicit and implicit bias measures revealed a weak but positive association (<italic>r</italic> &#x0003D; 0.269, <italic>p</italic> &#x0003D; 0.059), suggesting that participants who explicitly preferred lyrics also tended to show slower responses to version L-derived target words, although the relationship was modest (see <xref ref-type="fig" rid="F5">Figure 5</xref>).</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Correlation between explicit and implicit bias measures.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1659797-g0005.tif">
<alt-text content-type="machine-generated">Scatterplot showing the relationship between explicit preference bias on the x-axis and implicit reaction time bias on the y-axis, with a trend line indicating a positive correlation (r equals 0.27, p equals 0.059).</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>3.8</label>
<title>Effect of demographic variables</title>
<p>Finally, we explored whether demographic variables (sex, age, education level, musical experience, and music liking) could explain additional variance by incorporating them as predictors. None of these variables showed a statistically significant effect, indicating that individual differences of participants did not account for more variance than the main condition effect.</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>While previous research has suggested a tight integration of melody and lyrics in priming paradigms (<xref ref-type="bibr" rid="B24">Gordon et al., 2010</xref>), our findings indicate that musical components exert a stronger influence on conceptual processing than lyrical ones. By examining how each modality contributes to semantic priming, we add to the ongoing discussion of whether music and language engage integrated or independent cognitive systems. The observed processing advantage for music suggests that listeners access the semantic content of musical information more quickly than that of lyrics. This effect persisted even after controlling for a wide range of lexical and semantic characteristics of the target words, indicating that it cannot be explained by surface-level word properties alone (<xref ref-type="bibr" rid="B66">Syssau and Font, 2005</xref>; <xref ref-type="bibr" rid="B64">Stenneken et al., 2007</xref>). These results highlight music&#x00027;s capacity to evoke conceptual meaning independently and efficiently, and point to a potential asymmetry in how music and language contribute to the semantic interpretation of song.</p>
<p>Our findings align with studies that highlight the conceptual and affective richness of music (<xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>; <xref ref-type="bibr" rid="B39">K&#x000FC;ssner and Eerola, 2019</xref>; <xref ref-type="bibr" rid="B42">Margulis et al., 2022</xref>; <xref ref-type="bibr" rid="B35">Koelsch, 2011</xref>) and provide further evidence that music is not a mere carrier of affect but can independently generate meaningful semantic associations. While earlier studies have merely debated whether music or lyrics dominate affective responses in song (<xref ref-type="bibr" rid="B65">Stratton and Zalanowski, 1994</xref>; <xref ref-type="bibr" rid="B62">Sousou, 1997</xref>; <xref ref-type="bibr" rid="B13">Brattico et al., 2011</xref>), our results demonstrate a clear behavioral advantage for music-based conceptual processing in song interpretation.</p>
<p>While earlier studies have often focused on structural or perceptual aspects of the music&#x02013;language interface, our results suggest that a degree of independence also extends to the semantic level. However, affective scores of the recordings collected in a pre-test session point to partial convergence: music and lyrics together produced emotional responses that were more moderate than either component alone (see Section 2.2.1). Strikingly, the presence of lyrics consistently reduced both the intensity and positivity of emotional responses. Overall, version M tended to receive higher ratings in both valence and arousal, while version L was rated lower on these dimensions compared to the stimulus that combined both music and lyrics. These findings support previous results by <xref ref-type="bibr" rid="B28">Ilie and Thompson (2006)</xref>, who reported that purely musical stimuli elicited more positive and more intense emotional responses than vocal or spoken stimuli. Our results suggest that the presence of lyrics may dampen both the emotional intensity and the perceived positivity of musical excerpts. This aligns with prior work suggesting that lyrics can increase perceived complexity and dampen enjoyment (<xref ref-type="bibr" rid="B23">Gfeller and Coffman, 1991</xref>), and that melodies alone evoke stronger emotional responses than when paired with lyrics (<xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>). One possibility is that lyrics add cognitive and emotional complexity, or reduce emotional clarity especially if comprehension is hindered. Indeed, <xref ref-type="bibr" rid="B33">Johnson et al. (2013)</xref> found that sung lyrics, particularly melismatic ones, impair verbal recognition compared to spoken text. That lyrics may weaken music&#x00027;s emotional impact is counterintuitive and merits further exploration.</p>
<p>Importantly, our study addressed limitations in prior research by using unfamiliar song material, minimizing the influence of personal memories and genre expectations (<xref ref-type="bibr" rid="B18">Cuddy et al., 2017</xref>; <xref ref-type="bibr" rid="B30">Jakubowski et al., 2020</xref>; <xref ref-type="bibr" rid="B38">Krumhansl and Zupnick, 2013</xref>; <xref ref-type="bibr" rid="B50">Parks and Clancy Dollinger, 2014</xref>). We assessed participants&#x00027; familiarity with the musical work of Boris Vian using a demographic questionnaire, which showed that only 10% of participants were familiar with it. Critically, to the best of our knowledge, some of the songs used in this study were recorded for the first time by our team, meaning that prior exposure is unlikely. At the same time, the focus on the French language represents one of the strengths of this study, as it helps counterbalance the over-representation of English in the cognitive sciences (<xref ref-type="bibr" rid="B7">Blasi et al., 2022</xref>). On the other hand, despite efforts to enhance ecological validity by using real music excerpts, the experimental setting and the laboratory presentation of song snippets differ substantially from real-life listening experiences, and the results should therefore be interpreted with caution.</p>
<p>The collection of affective scores of each type of stimuli and carefully controlled lexical and semantic target properties strengthen the internal validity of our findings. Additionally, our methodological innovations, such as computing inner congruency of song excerpts using Euclidean distance in affective space and cosine similarity in semantic space, provide a novel approach to quantifying semantic alignment in multimodal stimuli. Crucially, we explored broader conceptual processing and thereby investigated semantic aspects beyond emotion processing in song (<xref ref-type="bibr" rid="B45">Mori and Iwanaga, 2013</xref>; <xref ref-type="bibr" rid="B70">Vidas et al., 2020</xref>; <xref ref-type="bibr" rid="B13">Brattico et al., 2011</xref>). While previous studies have indicated that instrumental music can evoke semantic concepts beyond emotion (<xref ref-type="bibr" rid="B54">Proverbio et al., 2022</xref>; <xref ref-type="bibr" rid="B36">Koelsch et al., 2004</xref>), here we show that such concepts may be processed preferentially over concepts evoked by lyrics at the same time.</p>
<p>Building on prior work (e.g., <xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>), we further tested a congruency hypothesis by operationalizing music&#x02013;lyrics congruence as Euclidean distance in valence&#x02013;arousal space between excerpt versions M and L. We examined whether this distance predicted ML ratings and found that congruence did not explain valence judgments, whereas greater music&#x02013;lyrics incongruence was associated with heightened emotional arousal. This contradicts <xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu (2006)</xref>&#x00027;s findings and suggests that emotional dissonance may amplify arousal, rather than diminish it. However, these findings are consistent with those of <xref ref-type="bibr" rid="B65">Stratton and Zalanowski (1994)</xref>, who found that combining pleasant music with negative lyrics can intensify emotional impact through a cognitive dissonance mechanism, requiring listeners to reconcile conflicting cues. This effort to resolve incongruity may enhance emotional engagement.</p>
<p>Furthermore, we found that the semantic distance between the affective profiles of the prime and the target&#x02014;expressed as Euclidean distance&#x02014;significantly predicted reaction times, but only for version M-derived targets. Greater affective distance resulted in longer response times, indicating that affective congruence facilitates lexical access when the target word was conceptually related to the musical aspect of the prime. The absence of this effect when the target word was conceptually related to the lyrics of the prime suggests that lyrics may not be processed with the same level of affective integration, or that their affective content is less salient when stripped of musical context. This dissociation provides new insight into how emotional and conceptual processing may differ between musical and linguistic modalities within song (<xref ref-type="bibr" rid="B1">Ali and Peynircio&#x0011F;lu, 2006</xref>). Music-derived targets appear more sensitive to affective mismatch with the primes, further supporting the idea that music conveys rich affective meaning that interacts dynamically with subsequent cognitive processing (<xref ref-type="bibr" rid="B9">Boltz, 2001</xref>). This is in line with embodied and grounded cognition theories, which propose that affective states shape conceptual processing (<xref ref-type="bibr" rid="B21">Fernandino et al., 2022</xref>). This again provides support to studies that have found music and lyrics processing independent (<xref ref-type="bibr" rid="B6">Besson et al., 1998</xref>).</p>
<p>We also found that participants&#x00027; explicit preferences for music or lyrics, as assessed through a <italic>ad hoc</italic> questionnaire items, showed a weak positive correlation with their implicit processing bias in reaction times. While not statistically significant, this trend hints at an individual difference factor that could be explored in future research, especially in studies that aim to personalize or tailor music-based interventions.</p>
<p>In sum, our findings challenge the assumption that lyrics dominate song interpretation (<xref ref-type="bibr" rid="B65">Stratton and Zalanowski, 1994</xref>; <xref ref-type="bibr" rid="B52">Peynircio&#x0011F;lu et al., 1998</xref>; <xref ref-type="bibr" rid="B61">Serafine et al., 1984</xref>) and reveal the complex, sometimes unexpected interplay between music and language. Future work should further explore the mechanisms of emotional blending and processing interference between modalities, especially under naturalistic listening conditions. Taken together, these results provide compelling evidence that musical information can exert a stronger and more immediate effect on conceptual processing than lyrics. While not dismissing the role of language in song, our findings indicate that music and language engage overlapping but distinct interpretative pathways, and that their integration in song may be more asymmetrical than previously assumed.</p>
<p>Lastly, it should be noted that although we chose the stimulus material to eliminate what we perceived to be limitations in previous studies, the selected stimuli present their own limitations regarding the generalizability of our findings: they are from a particular genre, in a particular language, composed and written by a particular person, and perceived by particular listeners. Future research should therefore examine how familiarity, genre and individual listener characteristics modulate the balance between music and lyrics in semantic processing.</p>
</sec>
<sec sec-type="conclusions" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This study provides compelling evidence that music can independently and efficiently drive conceptual processing in song, often more so than lyrics. By demonstrating a consistent reaction time advantage for music-based targets&#x02014;even when controlling for semantic and lexical factors&#x02014;we highlight a meaningful asymmetry in how musical and linguistic components contribute to song interpretation. Our findings challenge assumptions of lyrical dominance and support the view that music and language, while often integrated in song, engage distinct interpretive processes. These results underscore music&#x00027;s capacity to convey rich semantic and affective content, advancing our understanding of song perception and open new avenues for exploring the interplay of affect, semantics, and modality in human cognition and offering a foundation for future research into the complex dynamics of multimodal communication.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Ethics Review Board of Masaryk University. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>AK: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. A-XC: Conceptualization, Supervision, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<ack><title>Acknowledgments</title><p>Part of the work was supported by the research infrastructure HUME Lab Experimental Humanities Laboratory, Faculty of Arts, Masaryk University, and supervised by Christophe G. L. Cusimano.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s12">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fpsyg.2026.1659797/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fpsyg.2026.1659797/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname> <given-names>S. O.</given-names></name> <name><surname>Peynircio&#x0011F;lu</surname> <given-names>Z. F.</given-names></name></person-group> (<year>2006</year>). <article-title>Songs and emotions: are lyrics and melodies equal partners?</article-title> <source>Psychol. Music</source> <volume>34</volume>, <fpage>511</fpage>&#x02013;<lpage>534</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0305735606067168</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Asaridou</surname> <given-names>S. S.</given-names></name> <name><surname>McQueen</surname> <given-names>J. M.</given-names></name></person-group> (<year>2013</year>). <article-title>Speech and music shape the listening brain: Evidence for shared domain-general mechanisms</article-title>. <source>Front. Psychol</source>. <volume>4</volume>:<fpage>321</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2013.00321</pub-id><pub-id pub-id-type="pmid">23761776</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barradas</surname> <given-names>G. T.</given-names></name> <name><surname>Sakka</surname> <given-names>L. S.</given-names></name></person-group> (<year>2022</year>). <article-title>When words matter: a cross-cultural perspective on lyrics and their relationship to musical emotions</article-title>. <source>Psychol. Music</source> <volume>50</volume>, <fpage>650</fpage>&#x02013;<lpage>669</lpage>. doi: <pub-id pub-id-type="doi">10.1177/03057356211013390</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Barto&#x00144;</surname> <given-names>K.</given-names></name></person-group> (<year>2024</year>). <source>Mumin: Multi-model inference</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://cran.r-project.org/package=MuMIn">https://cran.r-project.org/package=MuMIn</ext-link> (Accessed March 12, 2025).</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bates</surname> <given-names>D.</given-names></name> <name><surname>M&#x000E4;chler</surname> <given-names>M.</given-names></name> <name><surname>Bolker</surname> <given-names>B.</given-names></name> <name><surname>Walker</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>Fitting linear mixed-effects models using lme4</article-title>. <source>J. Stat. Softw</source>. <volume>67</volume>, <fpage>1</fpage>&#x02013;<lpage>48</lpage>. doi: <pub-id pub-id-type="doi">10.18637/jss.v067.i01</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Besson</surname> <given-names>M.</given-names></name> <name><surname>Fa&#x000EF;ta</surname> <given-names>F.</given-names></name> <name><surname>Peretz</surname> <given-names>I.</given-names></name> <name><surname>Bonnel</surname> <given-names>A.-M.</given-names></name> <name><surname>Requin</surname> <given-names>J.</given-names></name></person-group> (<year>1998</year>). <article-title>Singing in the brain: Independence of lyrics and tunes</article-title>. <source>Psychol. Sci</source>. <volume>9</volume>, <fpage>494</fpage>&#x02013;<lpage>498</lpage>. doi: <pub-id pub-id-type="doi">10.1111/1467-9280.00091</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Blasi</surname> <given-names>D. E.</given-names></name> <name><surname>Henrich</surname> <given-names>J.</given-names></name> <name><surname>Adamou</surname> <given-names>E.</given-names></name> <name><surname>Kemmerer</surname> <given-names>D.</given-names></name> <name><surname>Majid</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Over-reliance on English hinders cognitive science</article-title>. <source>Trends Cogn. Sci</source>. <volume>26</volume>, <fpage>1153</fpage>&#x02013;<lpage>1170</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2022.09.015</pub-id><pub-id pub-id-type="pmid">36253221</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bolognesi</surname> <given-names>M.</given-names></name> <name><surname>Pilgram</surname> <given-names>R.</given-names></name> <name><surname>van den Heerik</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). <article-title>Reliability in content analysis: the case of semantic feature norms classification</article-title>. <source>Behav. Res. Methods</source> <volume>49</volume>, <fpage>1984</fpage>&#x02013;<lpage>2001</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-016-0838-6</pub-id><pub-id pub-id-type="pmid">28039680</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Boltz</surname> <given-names>M. G.</given-names></name></person-group> (<year>2001</year>). <article-title>Musical soundtracks as a schematic influence on the cognitive processing of filmed events</article-title>. <source>Music Percept</source>. <volume>18</volume>, <fpage>427</fpage>&#x02013;<lpage>454</lpage>. doi: <pub-id pub-id-type="doi">10.1525/mp.2001.18.4.427</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bonin</surname> <given-names>P.</given-names></name> <name><surname>M&#x000E9;ot</surname> <given-names>A.</given-names></name> <name><surname>Aubert</surname> <given-names>L.</given-names></name> <name><surname>Malardier</surname> <given-names>N.</given-names></name> <name><surname>Niedenthal</surname> <given-names>P. M.</given-names></name> <name><surname>Capelle-Toczek</surname> <given-names>M.-C.</given-names></name></person-group> (<year>2003</year>). <article-title>Normes de concr&#x000E9;tude, de valeur d&#x00027;imagerie, de fr&#x000E9;quence subjective et de valence &#x000E9;motionnelle pour 866 mots</article-title>. <source>L&#x00027;ann&#x000E9;e Psychol</source>. <volume>103</volume>, <fpage>655</fpage>&#x02013;<lpage>694</lpage>. doi: <pub-id pub-id-type="doi">10.3406/psy.2003.29658</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bonin</surname> <given-names>P.</given-names></name> <name><surname>M&#x000E9;ot</surname> <given-names>A.</given-names></name> <name><surname>Ferrand</surname> <given-names>L.</given-names></name> <name><surname>Buga&#x000EF;ska</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>Sensory experience ratings (SERs) for 1,659 French words: relationships with other psycholinguistic variables and visual word recognition</article-title>. <source>Behav. Res. Methods</source> <volume>47</volume>, <fpage>813</fpage>&#x02013;<lpage>825</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-014-0503-x</pub-id><pub-id pub-id-type="pmid">24993636</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bonnel</surname> <given-names>A. M.</given-names></name> <name><surname>Faita</surname> <given-names>F.</given-names></name> <name><surname>Peretz</surname> <given-names>I.</given-names></name> <name><surname>Besson</surname> <given-names>M.</given-names></name></person-group> (<year>2001</year>). <article-title>Divided attention between lyrics and tunes of operatic songs: evidence for independent processing</article-title>. <source>Percept. Psychophys</source>. <volume>63</volume>, <fpage>1201</fpage>&#x02013;<lpage>1213</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03194534</pub-id><pub-id pub-id-type="pmid">11766944</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brattico</surname> <given-names>E.</given-names></name> <name><surname>Alluri</surname> <given-names>V.</given-names></name> <name><surname>Bogert</surname> <given-names>B.</given-names></name> <name><surname>Jacobsen</surname> <given-names>T.</given-names></name> <name><surname>Vartiainen</surname> <given-names>N.</given-names></name> <name><surname>Nieminen</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>A functional MRI study of happy and sad emotions in music with and without lyrics</article-title>. <source>Front. Psychol</source>. <volume>2</volume>:<fpage>308</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2011.00308</pub-id><pub-id pub-id-type="pmid">22144968</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Bregman</surname> <given-names>A. S.</given-names></name></person-group> (<year>1990</year>). <source>Auditory Scene Analysis: The Perceptual Organization of Sound</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT Press</publisher-name>. doi: <pub-id pub-id-type="doi">10.7551/mitpress/1486.001.0001</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Calma-Roddin</surname> <given-names>N. E.</given-names></name> <name><surname>Drury</surname> <given-names>J. E.</given-names></name></person-group> (<year>2020</year>). <article-title>Music, language, and the N400: ERP interference patterns across cognitive domains</article-title>. <source>Sci. Rep</source>. <volume>10</volume>:<fpage>11222</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-020-66732-0</pub-id><pub-id pub-id-type="pmid">32641708</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>C&#x000E9;spedes-Guevara</surname> <given-names>J.</given-names></name> <name><surname>Eerola</surname> <given-names>T.</given-names></name></person-group> (<year>2018</year>). <article-title>Music communicates affects, not basic emotions&#x02014;a constructionist account of attribution of emotional meanings to music</article-title>. <source>Front. Psychol</source>. <volume>9</volume>:<fpage>215</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2018.00215</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Crowder</surname> <given-names>R. G.</given-names></name> <name><surname>Serafine</surname> <given-names>M. L.</given-names></name> <name><surname>Repp</surname> <given-names>B.</given-names></name></person-group> (<year>1990</year>). <article-title>Physical interaction and association by contiguity in memory for the words and melodies of songs</article-title>. <source>Memory Cogn</source>. <volume>18</volume>, <fpage>469</fpage>&#x02013;<lpage>476</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03198480</pub-id><pub-id pub-id-type="pmid">2233260</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cuddy</surname> <given-names>L. L.</given-names></name> <name><surname>Sikka</surname> <given-names>R.</given-names></name> <name><surname>Silveira</surname> <given-names>K.</given-names></name> <name><surname>Bai</surname> <given-names>S.</given-names></name> <name><surname>Vanstone</surname> <given-names>A.</given-names></name></person-group> (<year>2017</year>). <article-title>Music-evoked autobiographical memories (MEAMs) in Alzheimer disease: evidence for a positivity effect</article-title>. <source>Cogent Psychol</source>. <volume>4</volume>:<fpage>1277578</fpage>. doi: <pub-id pub-id-type="doi">10.1080/23311908.2016.1277578</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Desrochers</surname> <given-names>A.</given-names></name> <name><surname>Thompson</surname> <given-names>G. L.</given-names></name></person-group> (<year>2009</year>). <article-title>Subjective frequency and imageability ratings for 3,600 French nouns</article-title>. <source>Behav. Res. Methods</source> <volume>41</volume>, <fpage>546</fpage>&#x02013;<lpage>557</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BRM.41.2.546</pub-id><pub-id pub-id-type="pmid">19363197</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Eerola</surname> <given-names>T.</given-names></name> <name><surname>Vuoskoski</surname> <given-names>J. K.</given-names></name></person-group> (<year>2011</year>). <article-title>A comparison of the discrete and dimensional models of emotion in music</article-title>. <source>Psychol. Music</source> <volume>39</volume>, <fpage>18</fpage>&#x02013;<lpage>49</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0305735610362821</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fernandino</surname> <given-names>L.</given-names></name> <name><surname>Tong</surname> <given-names>J.-Q.</given-names></name> <name><surname>Conant</surname> <given-names>L. L.</given-names></name> <name><surname>Humphries</surname> <given-names>C. J.</given-names></name> <name><surname>Binder</surname> <given-names>J. R.</given-names></name></person-group> (<year>2022</year>). <article-title>Decoding the information structure underlying the neural representation of concepts</article-title>. <source>Proc. Nat. Acad. Sci</source>. <volume>119</volume>:<fpage>e2108091119</fpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.2108091119</pub-id><pub-id pub-id-type="pmid">35115397</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fiveash</surname> <given-names>A.</given-names></name> <name><surname>Bedoin</surname> <given-names>N.</given-names></name> <name><surname>Gordon</surname> <given-names>R. L.</given-names></name> <name><surname>Tillmann</surname> <given-names>B.</given-names></name></person-group> (<year>2021</year>). <article-title>Processing rhythm in speech and music: shared mechanisms and implications for developmental speech and language disorders</article-title>. <source>Neuropsychology</source> <volume>35</volume>, <fpage>771</fpage>&#x02013;<lpage>791</lpage>. doi: <pub-id pub-id-type="doi">10.1037/neu0000766</pub-id><pub-id pub-id-type="pmid">34435803</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gfeller</surname> <given-names>K.</given-names></name> <name><surname>Coffman</surname> <given-names>D. D.</given-names></name></person-group> (<year>1991</year>). <article-title>An investigation of emotional response of trained musicians to verbal and music information</article-title>. <source>Psychomusicology</source> <volume>10</volume>:<fpage>31</fpage>. doi: <pub-id pub-id-type="doi">10.1037/h0094143</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gordon</surname> <given-names>R. L.</given-names></name> <name><surname>Sch&#x000F6;n</surname> <given-names>D.</given-names></name> <name><surname>Magne</surname> <given-names>C.</given-names></name> <name><surname>Ast&#x000E9;sano</surname> <given-names>C.</given-names></name> <name><surname>Besson</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>Words and melody are intertwined in perception of sung words: EEG and behavioral evidence</article-title>. <source>PLoS ONE</source> <volume>5</volume>:<fpage>e9889</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0009889</pub-id><pub-id pub-id-type="pmid">20360991</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Grainger</surname> <given-names>J.</given-names></name></person-group> (<year>1990</year>). <article-title>Word frequency and neighborhood frequency effects in lexical decision and naming</article-title>. <source>J. Mem. Lang</source>. <volume>29</volume>, <fpage>228</fpage>&#x02013;<lpage>244</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0749-596X(90)90074-A</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hiebel</surname> <given-names>N.</given-names></name> <name><surname>Ferret</surname> <given-names>O.</given-names></name> <name><surname>Fort</surname> <given-names>K.</given-names></name> <name><surname>N&#x000E9;v&#x000E9;ol</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;CLISTER: a corpus for semantic textual similarity in French clinical narratives,&#x0201D;</article-title> in <source>2022 Language Resources and Evaluation Conference, LREC 2022</source>, 4306&#x02013;4315. European Language Resources Association.</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hino</surname> <given-names>Y.</given-names></name> <name><surname>Kusunose</surname> <given-names>Y.</given-names></name> <name><surname>Lupker</surname> <given-names>S. J.</given-names></name> <name><surname>Jared</surname> <given-names>D.</given-names></name></person-group> (<year>2013</year>). <article-title>The processing advantage and disadvantage for homophones in lexical decision tasks</article-title>. <source>J. Exper. Psychol</source>. <volume>39</volume>, <fpage>529</fpage>&#x02013;<lpage>551</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0029122</pub-id><pub-id pub-id-type="pmid">22905930</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ilie</surname> <given-names>G.</given-names></name> <name><surname>Thompson</surname> <given-names>W. F.</given-names></name></person-group> (<year>2006</year>). <article-title>A comparison of acoustic cues in music and speech for three dimensions of affect</article-title>. <source>Music Percept</source>. <volume>23</volume>, <fpage>319</fpage>&#x02013;<lpage>330</lpage>. doi: <pub-id pub-id-type="doi">10.1525/mp.2006.23.4.319</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Interiano</surname> <given-names>M.</given-names></name> <name><surname>Kazemi</surname> <given-names>K.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Yang</surname> <given-names>J.</given-names></name> <name><surname>Yu</surname> <given-names>Z.</given-names></name> <name><surname>Komarova</surname> <given-names>N. L.</given-names></name></person-group> (<year>2018</year>). <article-title>Musical trends and predictability of success in contemporary songs in and out of the top charts</article-title>. <source>R. Soc. Open Sci</source>. <volume>5</volume>:<fpage>171274</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rsos.171274</pub-id><pub-id pub-id-type="pmid">29892348</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jakubowski</surname> <given-names>K.</given-names></name> <name><surname>Eerola</surname> <given-names>T.</given-names></name> <name><surname>Tillmann</surname> <given-names>B.</given-names></name> <name><surname>Perrin</surname> <given-names>F.</given-names></name> <name><surname>Heine</surname> <given-names>L.</given-names></name></person-group> (<year>2020</year>). <article-title>A cross-sectional study of reminiscence bumps for music-related memories in adulthood</article-title>. <source>Music Sci</source>. <volume>3</volume>, <fpage>1</fpage>&#x02013;<lpage>15</lpage>. doi: <pub-id pub-id-type="doi">10.1177/2059204320965058</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jansen</surname> <given-names>N.</given-names></name> <name><surname>Harding</surname> <given-names>E. E.</given-names></name> <name><surname>Loerts</surname> <given-names>H.</given-names></name> <name><surname>Ba&#x0015F;kent</surname> <given-names>D.</given-names></name> <name><surname>Lowie</surname> <given-names>W.</given-names></name></person-group> (<year>2023</year>). <article-title>The relation between musical abilities and speech prosody perception: a meta-analysis</article-title>. <source>J. Phon</source>. <volume>101</volume>:<fpage>101278</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.wocn.2023.101278</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname> <given-names>N.</given-names></name> <name><surname>Wu</surname> <given-names>Z.</given-names></name> <name><surname>Zhou</surname> <given-names>Y.</given-names></name></person-group> (<year>2025</year>). <article-title>Analytic visual word recognition in l2 learners: evidence from the length effect among ESL speakers</article-title>. <source>Appl. Psycholinguist</source>. <volume>46</volume>:<fpage>e33</fpage>. doi: <pub-id pub-id-type="doi">10.1017/S0142716425100209</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johnson</surname> <given-names>R. B.</given-names></name> <name><surname>Huron</surname> <given-names>D.</given-names></name> <name><surname>Collister</surname> <given-names>L.</given-names></name></person-group> (<year>2013</year>). <article-title>Music and lyrics interactions and their influence on recognition of sung words: an investigation of word frequency, rhyme, metric stress, vocal timbre, melisma, and repetition priming</article-title>. <source>Empir. Musicol. Rev</source>. <volume>9</volume>, <fpage>2</fpage>&#x02013;<lpage>20</lpage>. doi: <pub-id pub-id-type="doi">10.18061/emr.v9i1.3729</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Joulin</surname> <given-names>A.</given-names></name> <name><surname>Grave</surname> <given-names>E.</given-names></name> <name><surname>Bojanowski</surname> <given-names>P.</given-names></name> <name><surname>Mikolov</surname> <given-names>T.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;Bag of tricks for efficient text classification,&#x0201D;</article-title> in <source>Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics (EACL)</source>, 427&#x02013;431. doi: <pub-id pub-id-type="doi">10.18653/v1/E17-2068</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Koelsch</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <article-title>Towards a neural basis of processing musical semantics</article-title>. <source>Phys. Life Rev</source>. <volume>8</volume>, <fpage>89</fpage>&#x02013;<lpage>105</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.plrev.2011.04.004</pub-id><pub-id pub-id-type="pmid">21601541</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Koelsch</surname> <given-names>S.</given-names></name> <name><surname>Kasper</surname> <given-names>E.</given-names></name> <name><surname>Sammler</surname> <given-names>D.</given-names></name> <name><surname>Schulze</surname> <given-names>K.</given-names></name> <name><surname>Gunter</surname> <given-names>T.</given-names></name> <name><surname>Friederici</surname> <given-names>A. D.</given-names></name></person-group> (<year>2004</year>). <article-title>Music, language and meaning: brain signatures of semantic processing</article-title>. <source>Nat. Neurosci</source>. <volume>7</volume>, <fpage>302</fpage>&#x02013;<lpage>307</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn1197</pub-id><pub-id pub-id-type="pmid">14983184</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kousta</surname> <given-names>S.-T.</given-names></name> <name><surname>Vinson</surname> <given-names>D. P.</given-names></name> <name><surname>Vigliocco</surname> <given-names>G.</given-names></name></person-group> (<year>2009</year>). <article-title>Emotion words, regardless of polarity, have a processing advantage over neutral words</article-title>. <source>Cognition</source> <volume>112</volume>, <fpage>473</fpage>&#x02013;<lpage>481</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cognition.2009.06.007</pub-id><pub-id pub-id-type="pmid">19591976</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krumhansl</surname> <given-names>C. L.</given-names></name> <name><surname>Zupnick</surname> <given-names>J. A.</given-names></name></person-group> (<year>2013</year>). <article-title>Cascading reminiscence bumps in popular music</article-title>. <source>Psychol. Sci</source>. <volume>24</volume>, <fpage>2057</fpage>&#x02013;<lpage>2068</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0956797613486486</pub-id><pub-id pub-id-type="pmid">24006129</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>K&#x000FC;ssner</surname> <given-names>M. B.</given-names></name> <name><surname>Eerola</surname> <given-names>T.</given-names></name></person-group> (<year>2019</year>). <article-title>The content and functions of vivid and soothing visual imagery during music listening: findings from a survey study</article-title>. <source>Psychomusicology</source> <volume>29</volume>, <fpage>90</fpage>&#x02013;<lpage>99</lpage>. doi: <pub-id pub-id-type="doi">10.1037/pmu0000238</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>LaCroix</surname> <given-names>A. N.</given-names></name> <name><surname>Diaz</surname> <given-names>A. F.</given-names></name> <name><surname>Rogalsky</surname> <given-names>C.</given-names></name></person-group> (<year>2015</year>). <article-title>The relationship between the neural computations for speech and music perception is context-dependent: an activation likelihood estimate study</article-title>. <source>Front. Psychol</source>. <volume>6</volume>:<fpage>1138</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2015.01138</pub-id><pub-id pub-id-type="pmid">26321976</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Long</surname> <given-names>J. A.</given-names></name></person-group> (<year>2024</year>). <source>Interactions: Comprehensive, user-friendly toolkit for probing interactions</source>. <publisher-loc>CRAN</publisher-loc>: <publisher-name>Contributed Packages</publisher-name>.</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Margulis</surname> <given-names>E. H.</given-names></name> <name><surname>Wong</surname> <given-names>P. C. M.</given-names></name> <name><surname>Turnbull</surname> <given-names>C.</given-names></name> <name><surname>Kubit</surname> <given-names>B. M.</given-names></name> <name><surname>McAuley</surname> <given-names>J. D.</given-names></name></person-group> (<year>2022</year>). <article-title>Narratives imagined in response to instrumental music reveal culture-bounded intersubjectivity</article-title>. <source>Proc. Nat. Acad. Sci</source>. <volume>119</volume>:<fpage>e2110406119</fpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.2110406119</pub-id><pub-id pub-id-type="pmid">35064081</pub-id></mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mehr</surname> <given-names>S. A.</given-names></name> <name><surname>Singh</surname> <given-names>M.</given-names></name> <name><surname>Knox</surname> <given-names>D.</given-names></name> <name><surname>Ketter</surname> <given-names>D. M.</given-names></name> <name><surname>Pickens-Jones</surname> <given-names>D.</given-names></name> <name><surname>Atwood</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Universality and diversity in human song</article-title>. <source>Science</source> <volume>366</volume>:<fpage>eaax0868</fpage>. doi: <pub-id pub-id-type="doi">10.1126/science.aax0868</pub-id><pub-id pub-id-type="pmid">31753969</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Miranda</surname> <given-names>R. A.</given-names></name> <name><surname>Ullman</surname> <given-names>M. T.</given-names></name></person-group> (<year>2007</year>). <article-title>Double dissociation between rules and memory in music: An event-related potential study</article-title>. <source>Neuroimage</source> <volume>38</volume>, <fpage>331</fpage>&#x02013;<lpage>345</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.07.034</pub-id><pub-id pub-id-type="pmid">17855126</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mori</surname> <given-names>K.</given-names></name> <name><surname>Iwanaga</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Pleasure generated by sadness: effect of sad lyrics on the emotions induced by happy music</article-title>. <source>Psychol. Music</source> <volume>42</volume>, <fpage>643</fpage>&#x02013;<lpage>652</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0305735613483667</pub-id></mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nayak</surname> <given-names>S.</given-names></name> <name><surname>Coleman</surname> <given-names>P. L.</given-names></name> <name><surname>Lad&#x000E1;nyi</surname> <given-names>E.</given-names></name> <name><surname>Nitin</surname> <given-names>R.</given-names></name> <name><surname>Gustavson</surname> <given-names>D. E.</given-names></name> <name><surname>Fisher</surname> <given-names>S. E.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>The musical abilities, pleiotropy, language, and environment (MAPLE) framework for understanding musicality&#x02013;language links across the lifespan</article-title>. <source>Neurobiol. Lang</source>. <volume>3</volume>, <fpage>615</fpage>&#x02013;<lpage>664</lpage>. doi: <pub-id pub-id-type="doi">10.1162/nol_a_00079</pub-id><pub-id pub-id-type="pmid">36742012</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>New</surname> <given-names>B.</given-names></name> <name><surname>Pallier</surname> <given-names>C.</given-names></name> <name><surname>Brysbaert</surname> <given-names>M.</given-names></name> <name><surname>Ferrand</surname> <given-names>L.</given-names></name></person-group> (<year>2004</year>). <article-title>Lexique 2: a new french lexical database</article-title>. <source>Behav. Res. Methods, Instr. Comput</source>. <volume>36</volume>, <fpage>516</fpage>&#x02013;<lpage>524</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03195598</pub-id><pub-id pub-id-type="pmid">15641440</pub-id></mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Norman-Haignere</surname> <given-names>S. V.</given-names></name> <name><surname>Feather</surname> <given-names>J. H.</given-names></name> <name><surname>Boebinger</surname> <given-names>D.</given-names></name> <name><surname>Brunner</surname> <given-names>P.</given-names></name> <name><surname>Ritaccio</surname> <given-names>A.</given-names></name> <name><surname>McDermott</surname> <given-names>J. H.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>A neural population selective for song in human auditory cortex</article-title>. <source>Curr. Biol</source>. <volume>32</volume>, <fpage>1470</fpage>&#x02013;<lpage>1484</lpage>.e12. doi: <pub-id pub-id-type="doi">10.1016/j.cub.2022.01.069</pub-id><pub-id pub-id-type="pmid">35196507</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Omigie</surname> <given-names>D.</given-names></name></person-group> (<year>2015</year>). <article-title>Music and literature: are there shared empathy and predictive mechanisms underlying their affective impact?</article-title> <source>Front. Psychol</source>. <volume>6</volume>:<fpage>1250</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2015.01250</pub-id><pub-id pub-id-type="pmid">26379583</pub-id></mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parks</surname> <given-names>S. L.</given-names></name> <name><surname>Clancy Dollinger</surname> <given-names>S. M.</given-names></name></person-group> (<year>2014</year>). <article-title>The positivity effect and auditory recognition memory for musical excerpts in young, middle-aged, and older adults</article-title>. <source>Psychomusicology</source> <volume>24</volume>, <fpage>298</fpage>&#x02013;<lpage>308</lpage>. doi: <pub-id pub-id-type="doi">10.1037/pmu0000079</pub-id></mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Peretz</surname> <given-names>I.</given-names></name> <name><surname>Radeau</surname> <given-names>M.</given-names></name> <name><surname>Arguin</surname> <given-names>M.</given-names></name></person-group> (<year>2004</year>). <article-title>Two-way interactions between music and language: evidence from priming recognition of tune and lyrics in familiar songs</article-title>. <source>Memory Cogn</source>. <volume>32</volume>, <fpage>142</fpage>&#x02013;<lpage>152</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03195827</pub-id><pub-id pub-id-type="pmid">15078051</pub-id></mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Peynircio&#x0011F;lu</surname> <given-names>Z. F.</given-names></name> <name><surname>Tekcan</surname> <given-names>A. I.</given-names></name> <name><surname>Wagner</surname> <given-names>J. L.</given-names></name> <name><surname>Baxter</surname> <given-names>T. L.</given-names></name> <name><surname>Shaffer</surname> <given-names>S. D.</given-names></name></person-group> (<year>1998</year>). <article-title>Name or hum that tune: feeling of knowing for music</article-title>. <source>Memory Cogn</source>. <volume>26</volume>, <fpage>1131</fpage>&#x02013;<lpage>1137</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03201190</pub-id><pub-id pub-id-type="pmid">9847541</pub-id></mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Proverbio</surname> <given-names>A. M.</given-names></name> <name><surname>Piotti</surname> <given-names>E.</given-names></name></person-group> (<year>2022</year>). <article-title>Common neural bases for processing speech prosody and music: an integrated model</article-title>. <source>Psychol. Music</source> <volume>50</volume>, <fpage>1408</fpage>&#x02013;<lpage>1423</lpage>. doi: <pub-id pub-id-type="doi">10.1177/03057356211050117</pub-id></mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Proverbio</surname> <given-names>A. M.</given-names></name> <name><surname>Spirito</surname> <given-names>D.</given-names></name> <name><surname>Benedetto</surname> <given-names>F. D.</given-names></name></person-group> (<year>2022</year>). <source>The interactive effects of conjoined music and visual art processing</source>. OSF Preprints. Preprint. doi: <pub-id pub-id-type="doi">10.31234/osf.io/6pw2y</pub-id></mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="web"><collab>R Core Team</collab> (<year>2023</year>). <source>R: A Language and Environment for Statistical Computing</source>. R Foundation for Statistical Computing. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.R-project.org/">https://www.R-project.org/</ext-link></mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rogalsky</surname> <given-names>C.</given-names></name> <name><surname>Rong</surname> <given-names>F.</given-names></name> <name><surname>Saberi</surname> <given-names>K.</given-names></name> <name><surname>Hickok</surname> <given-names>G.</given-names></name></person-group> (<year>2011</year>). <article-title>Functional anatomy of language and music perception: temporal and structural factors investigated using functional magnetic resonance imaging</article-title>. <source>J. Neurosci</source>. <volume>31</volume>, <fpage>3843</fpage>&#x02013;<lpage>3852</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4515-10.2011</pub-id><pub-id pub-id-type="pmid">21389239</pub-id></mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rossi</surname> <given-names>S.</given-names></name> <name><surname>Gugler</surname> <given-names>M. F.</given-names></name> <name><surname>Rungger</surname> <given-names>M.</given-names></name> <name><surname>Galvan</surname> <given-names>O.</given-names></name> <name><surname>Zorowka</surname> <given-names>P. G.</given-names></name> <name><surname>Seebacher</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>How the brain understands spoken and sung sentences</article-title>. <source>Brain Sci</source>. <volume>10</volume>:<fpage>36</fpage>. doi: <pub-id pub-id-type="doi">10.3390/brainsci10010036</pub-id><pub-id pub-id-type="pmid">31936356</pub-id></mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sammler</surname> <given-names>D.</given-names></name> <name><surname>Baird</surname> <given-names>A.</given-names></name> <name><surname>Valabr&#x000E9;gue</surname> <given-names>R.</given-names></name> <name><surname>Cl&#x000E9;ment</surname> <given-names>S.</given-names></name> <name><surname>Dupont</surname> <given-names>S.</given-names></name> <name><surname>Belin</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>The relationship of lyrics and tunes in the processing of unfamiliar songs: a functional magnetic resonance adaptation study</article-title>. <source>J. Neurosci</source>. <volume>30</volume>, <fpage>3572</fpage>&#x02013;<lpage>3578</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.2751-09.2010</pub-id><pub-id pub-id-type="pmid">20219991</pub-id></mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Samson</surname> <given-names>S.</given-names></name> <name><surname>Zatorre</surname> <given-names>R. J.</given-names></name></person-group> (<year>1991</year>). <article-title>Recognition memory for text and melody of songs after unilateral temporal lobe lesion: evidence for dual encoding</article-title>. <source>J. Exper. Psychol</source>. <volume>17</volume>, <fpage>793</fpage>&#x02013;<lpage>804</lpage>. doi: <pub-id pub-id-type="doi">10.1037/0278-7393.17.4.793</pub-id><pub-id pub-id-type="pmid">1832437</pub-id></mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schulze</surname> <given-names>K.</given-names></name> <name><surname>Zysset</surname> <given-names>S.</given-names></name> <name><surname>Mueller</surname> <given-names>K.</given-names></name> <name><surname>Friederici</surname> <given-names>A. D.</given-names></name> <name><surname>Koelsch</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <article-title>Neuroarchitecture of verbal and tonal working memory in nonmusicians and musicians</article-title>. <source>Hum. Brain Mapp</source>. <volume>32</volume>, <fpage>771</fpage>&#x02013;<lpage>783</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.21060</pub-id><pub-id pub-id-type="pmid">20533560</pub-id></mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Serafine</surname> <given-names>M. L.</given-names></name> <name><surname>Crowder</surname> <given-names>R. G.</given-names></name> <name><surname>Repp</surname> <given-names>B. H.</given-names></name></person-group> (<year>1984</year>). <article-title>Integration of melody and text in memory for songs</article-title>. <source>Cognition</source> <volume>16</volume>, <fpage>285</fpage>&#x02013;<lpage>303</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0010-0277(84)90031-3</pub-id><pub-id pub-id-type="pmid">6541107</pub-id></mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sousou</surname> <given-names>S. D.</given-names></name></person-group> (<year>1997</year>). <article-title>Effects of melody and lyrics on mood and memory</article-title>. <source>Percept. Mot. Skills</source> <volume>85</volume>, <fpage>31</fpage>&#x02013;<lpage>40</lpage>. doi: <pub-id pub-id-type="doi">10.2466/pms.1997.85.1.31</pub-id><pub-id pub-id-type="pmid">9293553</pub-id></mixed-citation>
</ref>
<ref id="B63">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Steinbeis</surname> <given-names>N.</given-names></name> <name><surname>Koelsch</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <article-title>Affective priming effects of musical sounds on the processing of word meaning</article-title>. <source>J. Cogn. Neurosci</source>. <volume>23</volume>, <fpage>604</fpage>&#x02013;<lpage>621</lpage>. doi: <pub-id pub-id-type="doi">10.1162/jocn.2009.21383</pub-id><pub-id pub-id-type="pmid">19925192</pub-id></mixed-citation>
</ref>
<ref id="B64">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stenneken</surname> <given-names>P.</given-names></name> <name><surname>Conrad</surname> <given-names>M.</given-names></name> <name><surname>Jacobs</surname> <given-names>A. M.</given-names></name></person-group> (<year>2007</year>). <article-title>Processing of syllables in production and recognition tasks</article-title>. <source>J. Psycholinguist. Res</source>. <volume>36</volume>, <fpage>65</fpage>&#x02013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10936-006-9033-8</pub-id><pub-id pub-id-type="pmid">17260187</pub-id></mixed-citation>
</ref>
<ref id="B65">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stratton</surname> <given-names>V. N.</given-names></name> <name><surname>Zalanowski</surname> <given-names>A. H.</given-names></name></person-group> (<year>1994</year>). <article-title>Affective impact of music vs. lyrics</article-title>. <source>Empirical Studies of the Arts</source> <volume>12</volume>, <fpage>173</fpage>&#x02013;<lpage>184</lpage>. doi: <pub-id pub-id-type="doi">10.2190/35T0-U4DT-N09Q-LQHW</pub-id></mixed-citation>
</ref>
<ref id="B66">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Syssau</surname> <given-names>A.</given-names></name> <name><surname>Font</surname> <given-names>N.</given-names></name></person-group> (<year>2005</year>). <article-title>&#x000C9;valuations des caract&#x000E9;ristiques &#x000E9;motionnelles d&#x00027;un corpus de 604 mots</article-title>. <source>Bull. Psychol</source>. <volume>477</volume>, <fpage>361</fpage>&#x02013;<lpage>367</lpage>. doi: <pub-id pub-id-type="doi">10.3917/bupsy.477.0361</pub-id></mixed-citation>
</ref>
<ref id="B67">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Syssau</surname> <given-names>A.</given-names></name> <name><surname>Lax&#x000E9;n</surname> <given-names>J.</given-names></name></person-group> (<year>2012</year>). <article-title>L&#x00027;influence de la richesse s&#x000E9;mantique dans la reconnaissance visuelle des mots &#x000E9;motionnels</article-title>. <source>Canad. J. Exper. Psychol</source>. <volume>66</volume>, <fpage>70</fpage>&#x02013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0027083</pub-id></mixed-citation>
</ref>
<ref id="B68">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tallal</surname> <given-names>P.</given-names></name> <name><surname>Gaab</surname> <given-names>N.</given-names></name></person-group> (<year>2006</year>). <article-title>Dynamic auditory processing, musical experience and language development</article-title>. <source>Trends Neurosci</source>. <volume>29</volume>, <fpage>382</fpage>&#x02013;<lpage>390</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tins.2006.06.003</pub-id><pub-id pub-id-type="pmid">16806512</pub-id></mixed-citation>
</ref>
<ref id="B69">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Temperley</surname> <given-names>D.</given-names></name></person-group> (<year>2022</year>). <article-title>Music and language</article-title>. <source>Ann. Rev. Ling</source>. <volume>8</volume>, <fpage>153</fpage>&#x02013;<lpage>170</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-linguistics-031220-121126</pub-id></mixed-citation>
</ref>
<ref id="B70">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vidas</surname> <given-names>D.</given-names></name> <name><surname>Calligeros</surname> <given-names>R.</given-names></name> <name><surname>Nelson</surname> <given-names>N. L.</given-names></name> <name><surname>Dingle</surname> <given-names>G. A.</given-names></name></person-group> (<year>2020</year>). <article-title>Development of emotion recognition in popular music and vocal bursts</article-title>. <source>Cogn. Emot</source>. <volume>34</volume>, <fpage>906</fpage>&#x02013;<lpage>919</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02699931.2019.1700482</pub-id><pub-id pub-id-type="pmid">31805815</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2032274/overview">Rakesh Balabantaray</ext-link>, International Institute of Information Technology, India</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3302523/overview">Ashutosh Bhoi</ext-link>, Gandhi Institute of Technology and Management (GITAM), India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3304960/overview">Mostafa Boieblan</ext-link>, Polytechnic University of Madrid, Spain</p>
</fn>
</fn-group>
</back>
</article>