<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2025.1472689</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>EEG microstates dynamics of happiness and sadness during music listening</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Gupta</surname> <given-names>Ashish</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Srivastava</surname> <given-names>Chandan Kumar</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bhushan</surname> <given-names>Braj</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/721066/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Behera</surname> <given-names>Laxmidhar</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2786624/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Electrical Engineering, Indian Institute of Technology</institution>, <addr-line>Kanpur</addr-line>, <country>India</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Humanities and Social Sciences, Indian Institute of Technology</institution>, <addr-line>Bombay</addr-line>, <country>India</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Humanities and Social Sciences, Indian Institute of Technology</institution>, <addr-line>Kanpur</addr-line>, <country>India</country></aff>
<aff id="aff4"><sup>4</sup><institution>School of Computing and Electrical Engineering, Indian Institute of Technology</institution>, <addr-line>Mandi</addr-line>, <country>India</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Simone Di Plinio, University of Studies G. d&#x00027;Annunzio Chieti and Pescara, Italy</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Divesh Thaploo, National Institutes of Health (NIH), United States</p>
<p>Bianca Maria Serena Inguscio, Sapienza University of Rome, Italy</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Laxmidhar Behera <email>lbehera&#x00040;iitk.ac.in</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>18</day>
<month>06</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>19</volume>
<elocation-id>1472689</elocation-id>
<history>
<date date-type="received">
<day>29</day>
<month>07</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>14</day>
<month>05</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2025 Gupta, Srivastava, Bhushan and Behera.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Gupta, Srivastava, Bhushan and Behera</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>The human brain naturally responds to music, with happy music enhancing attention and sad music aiding emotion regulation. However, the specific electroencephalogram (EEG) microstates linked to these cognitive and emotional effects remain unclear. This study investigated the microstates associated with happiness and sadness, focusing on the alpha band, using classical music as stimuli. Results revealed a significant increase in class D microstate, associated with attention, during happy music listening. An inverse relationship between class C (linked to mind-wandering) and class D microstates was observed. Analysis of global explained variance (GEV) and global field potential (GFP) indicated that happy music upregulated class D and downregulated class C microstates compared to baseline. In contrast, sad music elicited an increased presence of class B, class C, and class D microstates, with GEV and GFP analyses showing upregulation of class C and class D compared to the resting state. These findings suggest distinct cognitive effects: (1) an increase in class D and reduction in class C microstates explain enhanced attention during happy music listening, and (2) the concurrent upregulation of class C and class D microstates underpins enhanced emotion regulation and self-regulatory goals observed upon sad music listening. Notably, compared to baseline, the mean microstate duration was significantly longer for both happy (<italic>p</italic> = 0.018) and sad (<italic>p</italic> = 0.0003) music, indicating that music listening enhances the temporal stability of active microstates. These findings advance the understanding of the neural mechanisms underpinning music&#x00027;s cognitive and emotional effects, providing a framework to explore music-induced changes in brain dynamics and their implications for emotion regulation and attentional modulation.</p></abstract>
<kwd-group>
<kwd>EEG microstate</kwd>
<kwd>emotion</kwd>
<kwd>music</kwd>
<kwd>attention</kwd>
<kwd>mind-wandering</kwd>
</kwd-group>
<counts>
<fig-count count="11"/>
<table-count count="0"/>
<equation-count count="1"/>
<ref-count count="79"/>
<page-count count="17"/>
<word-count count="11639"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cognitive Neuroscience</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>Music possesses a unique ability to influence various musical as well as non-music domains, including intelligence (Rauscher et al., <xref ref-type="bibr" rid="B48">1993</xref>; Rideout and Laubach, <xref ref-type="bibr" rid="B50">1996</xref>), attention (Putkinen et al., <xref ref-type="bibr" rid="B47">2017</xref>; Markovic et al., <xref ref-type="bibr" rid="B36">2017</xref>; J&#x000E4;ncke et al., <xref ref-type="bibr" rid="B26">2015</xref>), emotion (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>), and the Default Mode Network (DMN) (Trost et al., <xref ref-type="bibr" rid="B67">2012</xref>; Wilkins et al., <xref ref-type="bibr" rid="B74">2014</xref>). The DMN is a neural system primarily associated with internally focused cognitive processes, including daydreaming, mind-wandering, self-referential thinking, reminiscing about the past, and planning for the future (Yeshurun et al., <xref ref-type="bibr" rid="B76">2021</xref>). The broad impact of music has led to positive effects on cognitive, motor, emotional, and social functioning in both healthy individuals and those with aging or neurological conditions (S&#x000E4;rk&#x000E4;m&#x000F6;, <xref ref-type="bibr" rid="B52">2018</xref>). Cognitive functions such as attention and emotion regulation are essential processes for normal human functioning, and music has been used as a supplementary tool to enhance these functions (Rauscher et al., <xref ref-type="bibr" rid="B48">1993</xref>; Rideout and Laubach, <xref ref-type="bibr" rid="B50">1996</xref>; Putkinen et al., <xref ref-type="bibr" rid="B47">2017</xref>; Markovic et al., <xref ref-type="bibr" rid="B36">2017</xref>; J&#x000E4;ncke et al., <xref ref-type="bibr" rid="B26">2015</xref>).</p>
<p>Studies have shown that even brief exposure to music can enhance the spatiotemporal performance (Rauscher et al., <xref ref-type="bibr" rid="B49">1995</xref>; Wilson and Brown, <xref ref-type="bibr" rid="B75">1997</xref>; Rauscher et al., <xref ref-type="bibr" rid="B48">1993</xref>; Rideout and Laubach, <xref ref-type="bibr" rid="B50">1996</xref>) of individuals, often referred to as the Mozart effect. Furthermore, research suggests that musical stimuli (Mammarella et al., <xref ref-type="bibr" rid="B35">2007</xref>) capable of inducing a moderate arousal and pleasant mood in individuals can lead to significant improvements in several cognitive performance (Schellenberg and Hallam, <xref ref-type="bibr" rid="B53">2005</xref>; Schellenberg et al., <xref ref-type="bibr" rid="B54">2007</xref>). Pleasant music stimulates brain regions associated with memory, attention, cognition, and IQ (Verrusio et al., <xref ref-type="bibr" rid="B70">2015</xref>). Recent research (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>) suggests that music affects the cognitive system, enhancing brain efficiency through three distinct mechanisms. First, it activates specific regions of the brain in the prefrontal and occipital lobes, which are responsible for IQ and attention. Additionally, music reduces unwanted brain activities, effectively minimizing interference and optimizing cognitive processes.</p>
<p>People generally gravitate toward happy music (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>) and strive to avoid sadness in their lives. However, paradoxically, they exhibit a strong inclination toward sad music (Taruffi and Koelsch, <xref ref-type="bibr" rid="B63">2014</xref>), particularly during adverse moments&#x00027; ranging from everyday struggles to relationship difficulties and profound experiences such as the loss of a loved one (Hanser et al., <xref ref-type="bibr" rid="B23">2016</xref>). Research has provided evidence that sad music can evoke a pleasurable experience characterized by a sense of solace (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>) and profound beauty (Sachs et al., <xref ref-type="bibr" rid="B51">2015</xref>). This feeling is different from real-life sadness (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>; Taruffi and Koelsch, <xref ref-type="bibr" rid="B63">2014</xref>; Sachs et al., <xref ref-type="bibr" rid="B51">2015</xref>). The positive effects of listening to sad music on managing difficult circumstances have been extensively studied and well-documented (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>; Van den Tol and Edwards, <xref ref-type="bibr" rid="B68">2013</xref>; Hanser et al., <xref ref-type="bibr" rid="B23">2016</xref>).</p>
<p>Sad music is frequently sought after by healthy adolescents and young adults as a means of seeking solace (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>), consolation (Ter Bogt et al., <xref ref-type="bibr" rid="B65">2017</xref>), comfort (Taruffi and Koelsch, <xref ref-type="bibr" rid="B63">2014</xref>), and coping with their emotions (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>). Listening to sad music during challenging situations consistently serves various self-regulation goals in the cognitive, social, memory retrieval, distraction, mood enhancement, and affect re-experience domains (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>; Van den Tol and Edwards, <xref ref-type="bibr" rid="B68">2013</xref>). Furthermore, a recent study (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>) has shown that listening to sad music after recalling a personal sad event is associated with improved emotion and memory processing, as well as improved alertness. Findings suggest that sad music can have a profound impact on our emotional and cognitive experiences, facilitating the processing and regulation of emotions in challenging situations.</p>
<p>A comparative study (Taruffi et al., <xref ref-type="bibr" rid="B64">2017</xref>) found that happy music had a notable positive impact on meta-awareness, while sad music exhibited a considerable rise in mind-wandering when contrasted with happy music. This is further supported by the heightened centrality observed within the core nodes of the DMN during sad music listening compared to happy music (Taruffi et al., <xref ref-type="bibr" rid="B64">2017</xref>). The DMN has been recognized as the key network associated with mind-wandering (Mason et al., <xref ref-type="bibr" rid="B37">2007</xref>; Kucyi et al., <xref ref-type="bibr" rid="B33">2013</xref>; Andrews-Hanna et al., <xref ref-type="bibr" rid="B2">2010a</xref>,<xref ref-type="bibr" rid="B3">b</xref>; Christoff et al., <xref ref-type="bibr" rid="B10">2009</xref>). Numerous other studies have consistently linked the DMN activity to music listening (Trost et al., <xref ref-type="bibr" rid="B67">2012</xref>; Wilkins et al., <xref ref-type="bibr" rid="B74">2014</xref>; Janata, <xref ref-type="bibr" rid="B25">2009</xref>; Ford et al., <xref ref-type="bibr" rid="B16">2011</xref>; Brattico et al., <xref ref-type="bibr" rid="B5">2011</xref>). It should be noted that the experience of wandering of the mind while listening to sad music is different from ordinary wandering of the mind and is characterized by a unique blend of melancholy and pleasure associated with sad music (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>; Taruffi and Koelsch, <xref ref-type="bibr" rid="B63">2014</xref>; Sachs et al., <xref ref-type="bibr" rid="B51">2015</xref>) and comprises of spontaneous, self-referential thoughts, emotions, and cognitive processes (Taruffi et al., <xref ref-type="bibr" rid="B64">2017</xref>).</p>
<p>However, the field of music research is confronted with several obstacles, including the lack of a consistent scientific method for delivering musical interventions, the tendency to reduce its effects to surface-level emotional or esthetic experiences, and an incomplete understanding of how the brain functions while engaging with music. To address these issues, detailed and comprehensive studies are essential to reveal the deeper impact of music on cognitive abilities such as attention and emotion regulation. This line of research has the potential to reshape strategies in mental healthcare, educational methodologies, and cognitive therapy, paving the way for innovative and non-intrusive tools to enhance quality of life.</p>
<p>The application of EEG microstates, which represent distinct and non-overlapping topographies (Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>; Koenig et al., <xref ref-type="bibr" rid="B32">2002</xref>) in recorded electrical signals, has become increasingly popular in the field of electrical neuroimaging. EEG microstates, representing brief instances of coordinated electrical activity in the brain enduring tens of milliseconds, are considered quasi-stable functional states (Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>). One notable advantage of the microstate method is the reliability and comparability of the topographies obtained across different studies (Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>; Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>), regardless of the number of electrodes used (Zhang et al., <xref ref-type="bibr" rid="B78">2021</xref>), instructions given to participants (such as open or closed eyes) (Zanesco et al., <xref ref-type="bibr" rid="B77">2021</xref>), or the frequency range analyzed (F&#x000E9;rat et al., <xref ref-type="bibr" rid="B15">2022</xref>). Importantly, these microstates have demonstrated the potential to function as biomarkers (Schiller et al., <xref ref-type="bibr" rid="B55">2020</xref>) for neuropsychiatric disorders (Soni et al., <xref ref-type="bibr" rid="B60">2019</xref>; Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>), including mood and anxiety disorders, as well as Alzheimer&#x00027;s disease (Al Zoubi et al., <xref ref-type="bibr" rid="B1">2019</xref>; Tait et al., <xref ref-type="bibr" rid="B61">2020</xref>). Recently, it has been applied across a diverse array of studies, encompassing brain resting states (Schiller et al., <xref ref-type="bibr" rid="B55">2020</xref>), neuropsychiatric disorders (Nishida et al., <xref ref-type="bibr" rid="B42">2013</xref>; Soni et al., <xref ref-type="bibr" rid="B60">2019</xref>; Terpou et al., <xref ref-type="bibr" rid="B66">2022</xref>), sleepiness (Cantero et al., <xref ref-type="bibr" rid="B7">1999</xref>), and task-based brain activities (Seitzman et al., <xref ref-type="bibr" rid="B56">2017</xref>; Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Gu et al., <xref ref-type="bibr" rid="B18">2022</xref>; Jiang et al., <xref ref-type="bibr" rid="B27">2024</xref>).</p>
<p>Research has consistently identified specific spatiotemporal brain microstates in independent studies, commonly categorized into four distinct classes, A, B, C, and D, based on their unique topological orientations. Map A is characterized by a left-right orientation, Map B by a right-left orientation, Map C by an anterior-posterior orientation, and Map D by a fronto-central maximum. This labeling convention has been widely adopted in various studies (Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>; Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Pal et al., <xref ref-type="bibr" rid="B43">2021</xref>; Liu et al., <xref ref-type="bibr" rid="B34">2021</xref>; Pascual-Marqui et al., <xref ref-type="bibr" rid="B44">2014</xref>). Each microstate is associated with specific functions, namely auditory information processing, visual information processing, DMN, and attention (Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>; Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>; Koenig et al., <xref ref-type="bibr" rid="B32">2002</xref>). A recent review (Tarailis et al., <xref ref-type="bibr" rid="B62">2023</xref>) on the functionality of EEG microstates has additionally associated class A with arousal. The author finds that in addition to visual processing by class B microstate, it plays a key role in scene visualization and self-representation within those scenes (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B6">2019</xref>). It is frequently observed during tasks involving autobiographical memory (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B6">2019</xref>). Furthermore, microstate B exhibits a stronger propensity to transition to microstate C (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B6">2019</xref>), which is linked to the self-experience system. The review further finds that class C relates to mind-wandering specifically to self-reflection and self-referential processes (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B6">2019</xref>; Custo et al., <xref ref-type="bibr" rid="B13">2017</xref>), while class D is linked to executive functioning, including processes such as working memory and attention (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B6">2019</xref>; Kim et al., <xref ref-type="bibr" rid="B30">2021</xref>).</p>
<p>Emotional states tend to evolve gradually, whereas EEG signals fluctuate rapidly, leading to significant variability in the features derived from them. Consequently, Chen et al. (<xref ref-type="bibr" rid="B9">2021</xref>) propose that examining EEG microstates provides a more nuanced understanding of emotions than conventional EEG analyses. Emotional research has benefited from the successful utilization of microstate analysis (Prete et al., <xref ref-type="bibr" rid="B46">2022</xref>; Chen et al., <xref ref-type="bibr" rid="B9">2021</xref>; Coll et al., <xref ref-type="bibr" rid="B11">2019</xref>), which has the potential to enhance emotion classification (Chen et al., <xref ref-type="bibr" rid="B9">2021</xref>; Shen et al., <xref ref-type="bibr" rid="B57">2020</xref>). The research findings indicate that the four microstates successfully capture the dynamic attributes of emotions (Prete et al., <xref ref-type="bibr" rid="B46">2022</xref>; Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>). However, research investigating the microstates&#x00027; underpinnings of basic emotions in music (especially audio) is very limited. In addition, to ensure consistency and allow precise neurophysiological interpretations in our current investigation, we chose four microstates that have shown reliability in previous research studies (Prete et al., <xref ref-type="bibr" rid="B46">2022</xref>; Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>; Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>; Koenig et al., <xref ref-type="bibr" rid="B32">2002</xref>).</p>
<p>Although microstate topographies are believed to be unrelated to oscillatory activity (F&#x000E9;rat et al., <xref ref-type="bibr" rid="B15">2022</xref>) and various approaches (Zulliger et al., <xref ref-type="bibr" rid="B79">2022</xref>), the alpha bands have been identified as the primary driving force behind microstates (Milz et al., <xref ref-type="bibr" rid="B39">2017</xref>). These alpha oscillations can also affect the number of peaks in the global field power (GFP). The periodic nature of EEG microstates is associated with the alpha band rotating phase observed during periods of rest (von Wegner et al., <xref ref-type="bibr" rid="B71">2021</xref>). Multiple studies have demonstrated that the alpha band microstates outperform those of other frequency bands in classifying conditions such as eyes open or eyes closed (F&#x000E9;rat et al., <xref ref-type="bibr" rid="B15">2022</xref>), as well as emotions (Shen et al., <xref ref-type="bibr" rid="B57">2020</xref>). A recent EEG microstate study highlighted the efficacy of the alpha band (8&#x02013;13 Hz) in examining the impact of happy and sad music on the brain (Gupta et al., <xref ref-type="bibr" rid="B21">2025</xref>). Based on this, our investigation focused specifically on the alpha band.</p>
<p>In summary, this study investigates the brain&#x00027;s microstates associated with the fundamental emotions of happiness and sadness within the alpha band. It also seeks to uncover the neural mechanisms underlying the observed cognitive and emotional enhancements during music listening.</p>
<p>As previously discussed, music is known for its ability to influence both emotional states and cognitive functions. Research suggests that listening to happy music can enhance cognitive abilities such as intelligence and attention (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>), while sad music often serves as an effective tool for emotional regulation and coping in challenging situations, as well as for improving attention (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>).</p>
<p>To achieve the study&#x00027;s objectives, we conducted a comparative microstate analysis across three conditions&#x02014;baseline (BL), music (MUS), and post-music (PMS)&#x02014;for each case while participants listened to happy and sad musical stimuli. We hypothesize that happy music will predominantly affect class D microstates, signifying enhanced attention during the experience of pleasant music. In contrast, sad music is expected to influence both class C and class D microstates, which are associated with self-referential processing (DMN) and attention, respectively.</p></sec>
<sec id="s2">
<title>2 Method</title>
<sec>
<title>2.1 Participants</title>
<p>This study utilized two separate secondary datasets to investigate the effects of happy and sad classical music, respectively. The first dataset (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>) consisted of 20 participants with a mean age of 24.06 years (SD = 2.69), who listened to happy classical music. The second dataset (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>) consisted of 20 participants with a mean age of 22.14 years (SD = 3.68), who listened to sad classical music following an adverse experience of sad autobiographical recall (SAR) of a negative real-life event in which they experienced sadness such as feelings of loss, loneliness, misunderstanding, heartbreak, betrayal, loss of a loved one, etc. (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>; Hanser et al., <xref ref-type="bibr" rid="B23">2016</xref>). Participants in both experiments were enrolled from a technology institute.</p>
<p>The methodology for these steps has been well-documented in the original study, and only relevant processing steps or modifications specific to this study are described below. To be eligible for the study, participants had to meet the criteria of having no formal or informal music training and being right-handed. The literature highlights differences in EEG microstates between musicians and non-musicians. Therefore, trained musicians were excluded from the analysis in the current study to maintain consistency. Exclusion criteria also encompassed hearing disorders, psychopathological diseases, neurological diseases, and recent usage of psychoactive drugs. Additionally, participants in the sad music experiment were screened out for any predisposition to depression. This precaution aimed to prevent the maladaptive use of sad music as a coping mechanism for emotion regulation in individuals prone to depression. To minimize potential confounding factors, only male participants were included in both experiments. This decision was based on previous observations of differences in biomarkers for cognitive (Neubauer and Fink, <xref ref-type="bibr" rid="B41">2009</xref>) and emotional processes (Goshvarpour and Goshvarpour, <xref ref-type="bibr" rid="B17">2019</xref>) between male and female participants (Whittle et al., <xref ref-type="bibr" rid="B73">2011</xref>). The studies were duly approved by the Institutional Ethics Committee (IEC) involving human subjects of the Indian Institute of Technology, Kanpur (IEC Communication no: IITK/IEC/2019&#x02013;20/I/18, IITK/IEC/2017&#x02013;18 I/3). Throughout the entire study, adherence to relevant guidelines and regulations was strictly upheld.</p>
</sec>
<sec>
<title>2.2 Stimulus and experimental procedure</title>
<p>The experiments were conducted in a soundproof laboratory to minimize external interference. Participants were seated comfortably, with stereo speakers positioned symmetrically about 2 m away for free-field auditory stimulus delivery. The room was dimly lit to create a calm atmosphere and reduce distractions, ensuring auditory stimuli were the primary focus.</p>
<p>Indian classical music was selected as an experimental stimulus due to its proven effects on cognitive and emotional brain functions (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>, <xref ref-type="bibr" rid="B20">2023</xref>). Researches show that Indian Ragas reduce stress, anxiety, and blood pressure (Kar et al., <xref ref-type="bibr" rid="B28">2015</xref>; Siritunga et al., <xref ref-type="bibr" rid="B58">2013</xref>), while enhancing life satisfaction and optimism (Gupta and Gupta, <xref ref-type="bibr" rid="B22">2016</xref>). Previous EEG studies have demonstrated their ability to modulate neural activity (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>, <xref ref-type="bibr" rid="B20">2023</xref>), making them ideal for exploring their impact on brain&#x00027;s microstates. The stimulus utilized for our investigation was performed by skilled professional musicians (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>, <xref ref-type="bibr" rid="B20">2023</xref>).</p>
<p>The first experiment investigated the effects of listening to happy music. It comprised three distinct states: a baseline resting state (duration: 275 s), a music listening state involving participants attentively hearing the happy music with their eyes closed, and finally a post-music silence state (duration: 275 s). Raga Darbari segment (duration: 9 min and 53 s) was used as the happy musical stimulus. Participants also rated their mood on an 11-point Likert scale upon listening to Raga Darbari during the experiment.</p>
<p>The second experiment investigated the effects of listening to sad music during an adverse situation. It encompassed four distinct conditions of 9 min each. First, there was a baseline resting state. Following that, participants engaged in a SAR condition, where they recalled a personal episode that evoked sadness. Subsequently, participants listened to sad music. Finally, there was a post-music silence condition. The Mishra Raga Jogiya segment (duration: 8 min and 44 s) was used as the sad musical stimulus. During the baseline, sad music listening, and post music silence conditions, participants were instructed to maintain a calm seated position while focusing their gaze on a centrally printed cross displayed on a blank sheet of paper. However, during the SAR condition, the cross was substituted with a writing pad. In this condition, they were encouraged to vividly and in detail report the real-life episode that evoked feelings of sadness, encompassing experiences such as loss, loneliness, heartbreak, betrayal, etc. (Hanser et al., <xref ref-type="bibr" rid="B23">2016</xref>) in the writing pad while supporting their elbow to minimize hand movements. Furthermore, participants were instructed to minimize any movement, including eye, head, and body movements, to minimize artifacts during the task while performing it in a natural manner.</p>
<p>Participants evaluated the vividness and reliving of autobiographical recall on a five-point scale. They also evaluated their mood on an 11-point Likert scale during the three states. Following the EEG experiment, participants completed a standard Self-Regulatory Goals Assessment questionnaire to asses self-regulatory goals upon sad raga listening. Additionally, they rated the efficiency of the sad musical stimulus in managing the SAR condition on an 11-point bidirectional scale with a range from &#x02013;5 to &#x0002B;5.</p>
</sec>
<sec>
<title>2.3 EEG recording and preprocessing</title>
<p>In both studies, the EEG signals from the participants were recorded using a g.HIamp bio-signal amplifier (Guger Technologies, OG, Graz, Austria). The EEG data were recorded at a sampling frequency of 512 Hz, and it was collected from 32 scalp positions following the International 10-20 system. The impedance level was maintained below 5 Kohms. To ensure appropriate signal quality, the EEG data was band-pass filtered between 0.01 and 100 Hz. In addition, EEG data were also recorded from four electrooculography (EOG) positions, including the upper and lower right eye and the outer canthus locations of both eyes, to detect and eliminate any artifacts caused by eye blinks.</p>
<p>EEG preprocessing was performed using the EEGLAB toolbox (Delorme and Makeig, <xref ref-type="bibr" rid="B14">2004</xref>). To enhance data processing, EEG data were down-sampled to a frequency of 256 Hz, and a high-pass filter with a 0.5 Hz cutoff was employed to eliminate any DC drift present in the signals. Visual inspection was performed to identify and mark any artifacts resulting from eye movements, muscle activity, or electrode movement. Bad electrodes were identified and interpolated to improve data quality. The EEG data were average-referenced. Independent Component Analysis (ICA) and SASICA (Semi-Automatic Selection of Independent Component Analysis) were employed to further eliminate artifacts caused by eye and muscle movements (Chaumon et al., <xref ref-type="bibr" rid="B8">2015</xref>; Crespo-Garcia et al., <xref ref-type="bibr" rid="B12">2008</xref>). EEG data were filtered to obtain the alpha band between 8 and 13 Hz. After excluding participants with high EEG artifacts and previous exposure to music, we were left with a total of 15 participants for each experiment, who were included in the subsequent microstate analysis.</p>
</sec>
<sec>
<title>2.4 Microstate analysis</title>
<p>We conducted a spatial k-means cluster analysis using the EEGLAB toolbox for each condition (Poulsen et al., <xref ref-type="bibr" rid="B45">2018</xref>). The analysis utilized maps based on the local maxima of the GFP, which identifies time points characterized by the largest signal-to-noise ratio. The analysis did not consider the polarity of the maps. The microstate cluster analysis was conducted on the combined EEG data of all participants within each condition. Brain microstate maps are typically categorized into classes A, B, C, and D based on their topological orientations (Koenig et al., <xref ref-type="bibr" rid="B31">1999</xref>). Specifically, microstate map A has a left-right orientation, map B shows a right-left orientation, map C displays an anterior-posterior orientation, and map D has a fronto-central maximum. This labeling convention has been consistently used in various studies (Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>; Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Pal et al., <xref ref-type="bibr" rid="B43">2021</xref>; Liu et al., <xref ref-type="bibr" rid="B34">2021</xref>; Pascual-Marqui et al., <xref ref-type="bibr" rid="B44">2014</xref>) (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figures S2</xref>&#x02013;<xref ref-type="supplementary-material" rid="SM1">S4</xref>). In our research, we followed this convention and classified the microstates into classes A, B, C, and D according to their topographical orientations as described initially by Koenig et al. (<xref ref-type="bibr" rid="B31">1999</xref>), in line with subsequent studies (Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Pal et al., <xref ref-type="bibr" rid="B43">2021</xref>; Liu et al., <xref ref-type="bibr" rid="B34">2021</xref>; Pascual-Marqui et al., <xref ref-type="bibr" rid="B44">2014</xref>).</p>
<p>Additionally, we calculated the spatial correlation among the four microstates of the brain under different conditions. Once the maps were identified for each condition, they were applied to each participant&#x00027;s EEG data within that specific condition. Each frame of time in the EEG data was assigned to the template that exhibited the best spatial correlation match. This procedure produced a microstate sequence unique to each participant, and these sequences were subsequently employed to compute participant-specific microstate parameters for each condition. <xref ref-type="fig" rid="F1">Figure 1</xref> illustrates the microstate analysis procedure applied to each participant under each condition.</p>
<list list-type="order">
<list-item><p>GFP: It serves as a reference-independent measure, representing the magnitude of the scalp electric field. GFP is equivalent to the spatial standard deviation of voltage amplitude and is typically measured in micro-volts (&#x003BC;V) (Murray et al., <xref ref-type="bibr" rid="B40">2008</xref>; Skrandies, <xref ref-type="bibr" rid="B59">1990</xref>).</p></list-item>
<list-item><p>GEV: This parameter quantifies the degree to which the selected template effectively represents the entire dataset. It is computed by summing the explained variances, with each value weighted according to the corresponding GFP at each time point (Murray et al., <xref ref-type="bibr" rid="B40">2008</xref>).
<disp-formula id="E1"><mml:math id="M1"><mml:msub><mml:mrow><mml:mstyle mathvariant='bold'><mml:mtext>GEV</mml:mtext></mml:mstyle></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mtext class="textrm" mathvariant="normal">corr</mml:mtext><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>m</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mfrac><mml:mrow><mml:mi>G</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow><mml:mrow><mml:mstyle displaystyle="true"><mml:msubsup><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msubsup></mml:mstyle><mml:mi>G</mml:mi><mml:mi>F</mml:mi><mml:msubsup><mml:mrow><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x02032;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac></mml:math></disp-formula>
In this context, GFP<sub><italic>t</italic></sub> represents the global field power for the <italic>t</italic><sup><italic>th</italic></sup> time sample. The variable <italic>s</italic><sub><italic>t</italic></sub> denotes EEG data corresponding to the <italic>t</italic><sup><italic>th</italic></sup> time, <italic>l</italic><sub><italic>t</italic></sub> signifies the label of the microstate of <italic>t</italic><sup><italic>th</italic></sup> EEG data, <italic>m</italic><sub><italic>l</italic><sub><italic>t</italic></sub></sub> stands for the microstate map corresponding to the <inline-formula><mml:math id="M2"><mml:msubsup><mml:mrow><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msubsup></mml:math></inline-formula>, and <italic>T</italic> is the total time period.</p></list-item>
<list-item><p>Coverage: It represents the percentage of time frames in which a particular microstate is present, indicating the relative duration of its activation (Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>; Murray et al., <xref ref-type="bibr" rid="B40">2008</xref>).</p></list-item>
<list-item><p>Occurrence: The mean number of times the microstate is observed within a 1-s period. It reflects the tendency of intracortical sources to synchronize their activation and is measured in Hertz (Hz) (Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>).</p></list-item>
<list-item><p>Duration: The mean temporal duration pertains to the average time span over which consecutive maps are attributed to the same microstate class (Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>).</p></list-item>
</list>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>A schematic of the microstate analysis process: <bold>(a)</bold> preprocessed EEG data, <bold>(b)</bold> high SNR topographies extracted from GFP maxima, <bold>(c)</bold> clustering for reliable microstate map detection, and <bold>(d)</bold> mapping microstates back to EEG data, assigning each time point to a dominant state, followed by feature computation.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0001.tif"/>
</fig>
<p>For correlation analysis during music listening, we divided the duration of the MUS condition into 10 segments. Microstates parameters calculated for each segment for each participant were utilized for correlation analysis. For comparative microstate analysis between BL, MUS, and PMS conditions, we selected 200 s of segment duration from each condition in both experiments (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S1</xref>).</p>
</sec>
<sec>
<title>2.5 Statistical analysis</title>
<p>To investigate the effects of microstate class on parameters such as global explained variance (GEV), occurrence, duration, global field power (GFP), and coverage during music listening, a one-way repeated measures ANOVA was conducted using SPSS, with microstate class treated as a within-subject factor. Additionally, to examine the combined effects of microstate class and experimental conditions, a two-way repeated measures ANOVA was performed, considering both factors as within-subject variables.</p>
<p>The mean values of the above variables (<italic>Post hoc</italic> analysis) and the subjective questionnaire scores were compared using a two-tailed <italic>t-</italic>test at a significance level of (&#x003B1;) = 0.05, and false discovery rate (FDR) correction was applied to address issues related to multiple comparisons. Furthermore, correlation analyses were conducted between Class C and Class D microstates for parameters including GEV, coverage, and GFP during music listening.</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>3 Results</title>
<sec>
<title>3.1 Experiment 1</title>
<sec>
<title>3.1.1 Microstate analysis for a happy Indian raga</title>
<p>We performed microstate analysis for the full duration of Raga Darbari music. <xref ref-type="fig" rid="F2">Figure 2a</xref> shows the four microstates underpinning raga darbari that explain 77.4% of GEV. The microstates are arranged according to the standard convention of classes A-D. The microstate maps were fitted back into the EEG data of the participants, yielding various parameters such as GEV, coverage, occurrence, duration, and inter-microstate transition probability.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Microstate analysis for happy Indian raga. <bold>(a)</bold> Four EEG microstates underpinning music (MUS) conditions. <bold>(b)</bold> Relative GEV for each microstate class during music listening. <bold>(c)</bold> Relative GFP in each microstate class during music listening. <bold>(d)</bold> Correlation between the microstate class D and class C for GEV. <bold>(e)</bold> Correlation between the microstate class D and class C for Coverage. <bold>(f)</bold> Correlation between the microstate class D and class C for GFP (**FDR corrected, <italic>p</italic> &#x0003C; 0.05; error bars = 1 SD).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0002.tif"/>
</fig>
<p><bold>GEV</bold>: We applied a one-way repeated ANOVA to examine the effect of microstate class on GEV. The results show a significant effect with a Greenhouse-Geisser correction (F<sub>1.741, 24.379</sub> = 12.246, <italic>p</italic> &#x0003C; 0.001). The <italic>post hoc</italic> paired <italic>t</italic>-test shows that the microstate of class D is significantly higher compared to class A (t = 4.4643, df = 14, <italic>p</italic> &#x0003C; 0.005, d = 1.1527), class B (t = 4.8103, df = 14, <italic>p</italic> &#x0003C; 0.005, d = 1.2420), and class C (t = 3.3471, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.8642) during happy music listening as shown in <xref ref-type="fig" rid="F2">Figure 2b</xref>.</p>
<p><bold>GFP</bold>: One-way repeated ANOVA with a Greenhouse-Geisser correction indicates a significant effect of microstate class on the GFP (F<sub>1.797, 25.163</sub> = 28.452, <italic>p</italic> &#x0003C; 0.001). <italic>Post hoc</italic> paired <italic>t</italic>-test shows that class D microstate has significantly higher GFP than class A (t = 7.1884, df = 14, <italic>p</italic> &#x0003C; 0.001, d = 1.8560), class B (t = 8.0431, df = 14, <italic>p</italic> &#x0003C; 0.001, d = 2.0767), and class C (t = 5.1199, df = 14, <italic>p</italic> &#x0003C; 0.001, d = 1.3220) during happy music listening, as shown in <xref ref-type="fig" rid="F2">Figure 2c</xref>. One-way repeated ANOVA effects of coverage, occurrence, duration, and inter-microstate transition probability were not significant. The results also showed a significant negative correlation between microstate class C and class D for GEV (r = &#x02013;0.72, <italic>p</italic> &#x0003C; 0.001) as shown in <xref ref-type="fig" rid="F2">Figure 2d</xref>, for coverage (r = &#x02013;0.81, <italic>p</italic> &#x0003C; 0.001) as shown in <xref ref-type="fig" rid="F2">Figure 2e</xref>, and positive correlation between microstate class C and class D for GFP (r = 0.9, <italic>p</italic> &#x0003C; 0.001) as shown in <xref ref-type="fig" rid="F2">Figure 2f</xref>.</p></sec>
<sec>
<title>3.1.2 Comparative microstate analysis among baseline resting state (BL), music (MUS), and post-music silence (PMS)</title>
<p><xref ref-type="fig" rid="F3">Figure 3</xref> shows four microstates that explained the variance (GEV) of 75.5%, 77.4%, and 74.43% during BL, MUS, and PMS conditions, respectively, for experiment 1. The underpinning microstates for the three conditions are arranged according to the standard convention. Results show strong spatial correlation of 0.9 among all the conditions for the corresponding microstate classes A-D (<italic>p</italic> &#x0003C; 0.001). To compare the conditions of BL, MUS, and PMS, 200 s of time duration for each condition was selected for further investigation (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S1</xref>).</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Microstate maps. Four EEG microstates under baseline (BL), music (MUS), and post-music silence (PMS) conditions. Spatial correlation between the corresponding microstate class across conditions.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0003.tif"/>
</fig>
<p><bold>GEV</bold>: A two-way repeated ANOVA with microstate class and condition as within factors shows a significant interaction with a Greenhouse-Geisser correction (F<sub>2.933, 41.068</sub> = 7.474, <italic>p</italic> &#x0003C; 0.001). The one-way follow-up repeated ANOVA shows a significant effect on GEV of class B (F<sub>2, 28</sub> = 5.015, <italic>p</italic> &#x0003C; 0.05), class C (F<sub>2, 28</sub> = 12.960, <italic>p</italic> &#x0003C; 0.001), and class D (F<sub>2, 28</sub> = 9.104, p = 0.001). Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows class B microstate during BL condition to have significantly higher GEV than class B during MUS (t = 3.5659, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.9207) condition as shown in <xref ref-type="fig" rid="F4">Figure 4a</xref>. Class C microstate during BL condition has significantly higher GEV than class C during MUS (t = 5.7033, df = 14, <italic>p</italic> &#x0003C; 0.0005, d = 1.4726) and PMS (t = 3.3379, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.8618) conditions as shown in <xref ref-type="fig" rid="F4">Figure 4b</xref>. The results also showed that the class D microstate during the MUS condition had a significantly enhanced GEV than that during BL (t = 3.4757, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.8974) and PMS (t = 2.8331, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.7315) condition as shown in <xref ref-type="fig" rid="F4">Figure 4c</xref>.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Microstates&#x00027; properties. <bold>(a)</bold> Relative GEV of microstates class B across conditions. <bold>(b)</bold> Relative GEV of microstates class C across conditions. <bold>(c)</bold> Relative GEV of microstates class D across conditions. <bold>(d)</bold> Relative occurrence of microstates class A across conditions. <bold>(e)</bold> Relative occurrence of microstates class B across conditions. <bold>(f)</bold> Relative occurrence of microstates class C across conditions (**FDR corrected, <italic>p</italic> &#x0003C; 0.05; error bars = 1 SD).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0004.tif"/>
</fig>
<p><bold>Occurrence</bold>: A two-way repeated ANOVA with microstate class and condition as within factors yielded a significant interaction (F<sub>6, 84</sub> = 4.995, <italic>p</italic> &#x0003C; 0.001). Follow-up one-way repeated ANOVA shows a significant effect on the occurrence of class A (F<sub>2, 28</sub> = 5.263, <italic>p</italic> &#x0003C; 0.011), class B (F<sub>2, 28</sub> = 5.943, <italic>p</italic> &#x0003C; 0.01), and class C (F<sub>2, 28</sub> = 8.138, <italic>p</italic> &#x0003C; 0.01) microstates. Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows class A microstate during BL condition to have significantly higher occurrence compared to class A during MUS (t = 2.5521, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.6589) and PMS (t = 2.6895, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.6944) conditions as shown in <xref ref-type="fig" rid="F4">Figure 4d</xref>. Class B microstate during the MUS condition has a significantly lower occurrence than class B during BL (t = &#x02013;3.8205, df = 14, <italic>p</italic> &#x0003C; 0.01, d = &#x02013;0.9865) and PMS (t = &#x02013;2.2660, df = 14, <italic>p</italic> &#x0003C; 0.05, d = &#x02013;0.5851) conditions as shown in <xref ref-type="fig" rid="F4">Figure 4e</xref>. The results also showed that the class C microstate during the BL condition had a significantly increased occurrence compared to that during MUS (t = 4.2302, df = 14, <italic>p</italic> &#x0003C; 0.01, d = 1.0922) and PMS (t = 2.5806, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.6663) conditions as shown in <xref ref-type="fig" rid="F4">Figure 4f</xref>.</p>
<p><bold>GFP</bold>: A two-way repeated ANOVA with microstate class and condition as within factors resulted in a significant interaction effect (F<sub>6, 84</sub> = 9.825, <italic>p</italic> &#x0003C; 0.001). Follow-up one-way repeated ANOVA shows a significant effect on GFP of class A (F<sub>2, 28</sub> = 4.411, <italic>p</italic> &#x0003C; 0.05) and class D (F<sub>2, 28</sub> = 5.484, p = 0.01) microstates. Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows class A microstate during MUS condition to have significantly higher GFP than class A during BL condition (t = 2.9839, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.7704) as shown in <xref ref-type="fig" rid="F5">Figure 5a</xref>. Class D microstate during MUS condition has significantly higher GFP than class D during BL condition (t = 3.8781, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 1.0013) as shown in <xref ref-type="fig" rid="F5">Figure 5b</xref>.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p><bold>(a)</bold> Relative GFP of microstate class A across conditions. <bold>(b)</bold> Relative GFP of microstate class D across conditions. <bold>(c)</bold> Relative mean duration of all microstates across conditions. <bold>(d)</bold> Depicts the average subjective mood assessment before and after listening to music (**FDR corrected, <italic>p</italic> &#x0003C; 0.05; error bars = 1 SD).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0005.tif"/>
</fig>
<p><bold>Duration</bold>: A two-way repeated ANOVA with microstate class and condition as within factors did not yield a significant interaction. However, we obtained a simple effect of conditions with a Greenhouse-Geisser correction (F<sub>1.434, 41.016</sub> = 3.859, <italic>p</italic> = 0.05). Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows the mean value of the duration of all microstates during the MUS condition to have significantly higher compared to the BL condition (t = 3.2203, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.8315) as shown in <xref ref-type="fig" rid="F5">Figure 5c</xref>.</p>
<p><bold>Subjective ratings</bold>: Raga darbari segment significantly expressed happiness (t = &#x02013;9.5232, df = 14, <italic>p</italic> &#x0003C; 0.001, d = &#x02013;2.4589)(Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>) in the participants as shown in <xref ref-type="fig" rid="F5">Figure 5d</xref>.</p>
</sec>
</sec>
<sec>
<title>3.2 Experiment 2</title>
<sec>
<title>3.2.1 Microstate analysis for a sad Indian raga</title>
<p>We conducted microstate analysis for the full duration of Raga Mishra jogiya. <xref ref-type="fig" rid="F6">Figure 6a</xref> shows the four microstates underpinning the raga that explain 77% of GEV. The microstates are arranged according to the standard convention of class A-D. The microstate maps were fitted back into the EEG data of the participants, yielding various parameters such as GEV, coverage, occurrence, duration, and inter-microstate transition probability.</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>Microstate analysis for sad Indian raga. <bold>(a)</bold> Four EEG microstates under MUS conditions. <bold>(b)</bold> Relative GEV in each microstate class during music listening. <bold>(c)</bold> Relative GFP in each microstate class during music listening. <bold>(d)</bold> Correlation between the microstate class D and class C for GEV, <bold>(e)</bold> Correlation between the microstate class D and class C for Coverage, and <bold>(f)</bold> correlation between the microstate class D and class C for GFP (**FDR corrected, <italic>p</italic> &#x0003C; 0.05; error bars = 1 SD).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0006.tif"/>
</fig>
<p><bold>GEV</bold>: We applied a one-way repeated ANOVA to examine the effect of microstate class on GEV. The results show a significant effect with a Greenhouse-Geisser correction (F<sub>1.656, 23.177</sub> = 7.719, <italic>p</italic> &#x0003C; 0.005). The <italic>post hoc</italic> paired <italic>t</italic>-test shows significantly higher presence of microstate of class D than class A (t = 4.3145, df = 14, <italic>p</italic> &#x0003C; 0.005, d = 1.1140) and class B (t = 2.4011, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.6200). Class C and class B microstates are significantly higher as compared to class A microstate with (t = 3.3963, df = 14, <italic>p</italic> &#x0003C; 0.01, d = 0.8769) and (t = 5.4788, df = 14, <italic>p</italic> &#x0003C; 0.005, d = 1.4146), respectively, during sad music listening as shown in <xref ref-type="fig" rid="F6">Figure 6b</xref>.</p>
<p><bold>GFP</bold>: One-way repeated ANOVA with a Greenhouse-Geisser correction indicates a significant effect of microstate class on the GFP (F<sub>1.911, 26.751</sub> = 11.126, <italic>p</italic> &#x0003C; 0.001). <italic>Post hoc</italic> paired <italic>t</italic>-test shows that the microstate of class D is significantly higher in GFP than class A (t = 5.2858, df = 14, <italic>p</italic> &#x0003C; 0.001, d = 1.3648) and class B (t = 3.6980, df = 14, <italic>p</italic> &#x0003C; 0.005, d = 0.9548). Class C and class B also have significantly higher GFP than class A with (t = 3.4829, df = 14, <italic>p</italic> &#x0003C; 0.005, d = 0.8993) and (t = 5.1187, df = 14, <italic>p</italic> &#x0003C; 0.001, d = 1.3217), respectively, during sad music listening as shown in <xref ref-type="fig" rid="F6">Figure 6c</xref>. We did not observe a significant effect of coverage, occurrence, duration, and inter-microstate transition probability. The results also showed a significant negative correlation between microstate class C and class D for GEV (r = &#x02013;0.55, <italic>p</italic> &#x0003C; 0.001) as shown in <xref ref-type="fig" rid="F6">Figure 6d</xref>, for coverage (r = &#x02013;0.57, <italic>p</italic> &#x0003C; 0.001) as shown in <xref ref-type="fig" rid="F6">Figure 6e</xref>, and a positive correlation between microstate class C and class D for GFP (r = 0.94, <italic>p</italic> &#x0003C; 0.001) as shown in <xref ref-type="fig" rid="F6">Figure 6f</xref>.</p></sec>
<sec>
<title>3.2.2 Comparative microstate analysis between BL, MUS, and PMS</title>
<p><xref ref-type="fig" rid="F7">Figure 7</xref> shows four microstates that explained the variance (GEV) of 77.95%, 77.77%, and 76.98% during BL, MUS, and PMS conditions, respectively, for experiment 2. The underpinning microstates for the three conditions are arranged according to the standard convention. Results show strong spatial correlation of 0.9 among all the conditions for the corresponding microstate classes A&#x02013;D (<italic>p</italic> &#x0003C; 0.001). To compare the conditions of BL, MUS, and PMS, 200 s duration was selected for further investigation (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S1</xref>).</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Microstate maps. Four EEG microstates under BL, MUS, and PMS conditions. Spatial correlation between the corresponding microstate class across conditions.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0007.tif"/>
</fig>
<p><bold>GEV</bold>: A two-way repeated ANOVA with microstate class and condition as within factors shows a significant interaction with a Greenhouse-Geisser correction (F<sub>3.1, 43.406</sub> = 3.251, <italic>p</italic> &#x0003C; 0.05). Follow-up one-way repeated ANOVA shows a significant effect on GEV of class C (F<sub>2, 28</sub> = 4.036, <italic>p</italic> &#x0003C; 0.05) and class D (F<sub>2, 28</sub> = 6.236, <italic>p</italic> &#x0003C; 0.01) microstates.</p>
<p>Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows class C microstate during PMS condition to have significantly higher GEV than class C during BL (t = 3.4570, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.8994) condition as shown in <xref ref-type="fig" rid="F8">Figure 8a</xref>. Class D microstate during MUS condition has significantly higher GEV than class D during BL [t = 2.0696, df = 14, <italic>p</italic> &#x0003C; 0.05 (uncorrected), d = 0.5344] and PMS (t = 3.9638, df = 14, <italic>p</italic> &#x0003C; 0.01, d = 1.0234) conditions as shown in <xref ref-type="fig" rid="F8">Figure 8b</xref>.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>Microstate parameters. <bold>(a)</bold> Relative GEV of microstates class C across conditions. <bold>(b)</bold> Relative GEV of microstates class D across conditions. <bold>(c)</bold> Relative mean duration of all microstates across conditions. <bold>(d)</bold> Relative mean GFP of all microstates across conditions (*uncorrected,**FDR corrected, <italic>p</italic> &#x0003C; 0.05; error bars = 1 SD).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0008.tif"/>
</fig>
<p><bold>Duration</bold>: Two-way repeated ANOVA with microstate class and condition as within factors did not show a significant interaction. However, we obtained a simple effect of conditions (F<sub>2, 84</sub> = 12.702, <italic>p</italic> &#x0003C; 0.001). Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows mean value of the duration of all microstate during MUS condition to be significantly higher than BL (t = 5.4489, df = 14, <italic>p</italic> &#x0003C; 0.001, d = 1.4069) and PMS conditions (t = 4.2820, df = 14, <italic>p</italic> &#x0003C; 0.01, d = 1.1056) as shown in <xref ref-type="fig" rid="F8">Figure 8c</xref>.</p>
<p><bold>GFP</bold>: We administered a two-way repeated ANOVA with microstate class and condition as within factors. Although the interaction was not significant, we obtained the simple effect of conditions (F<sub>2, 28</sub> = 11.220, <italic>p</italic> &#x0003C; 0.001). Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows mean GFP of all microstate during MUS condition to be significantly higher compared to BL condition (t = 4.0834, df = 14, <italic>p</italic> &#x0003C; 0.01, d = 1.0543) and PMS condition (t = 2.9317, df = 14, <italic>p</italic> &#x0003C; 0.05, d = 0.7570) as shown in <xref ref-type="fig" rid="F8">Figure 8d</xref>.</p>
<p><bold>Occurrence</bold>: We administered a two-way repeated ANOVA with microstate class and condition as within factors. The results show a significant interaction with a Greenhouse-Geisser correction (F<sub>3.299, 46.188</sub> = 3.122, <italic>p</italic> &#x0003C; 0.05). The one-way follow-up repeated ANOVA shows a significant effect on the occurrence of class A with a Greenhouse-Geisser correction (F<sub>1.369, 19.164</sub> = 9.047, <italic>p</italic> &#x0003C; 0.005), class B (F<sub>2, 28</sub> = 8.894, <italic>p</italic> = 0.001), class C (F<sub>2, 28</sub> = 10.730, <italic>p</italic> &#x0003C; 0.001), and class D (F<sub>2, 28</sub> = 5.313, <italic>p</italic> &#x0003C; 0.05). Further <italic>post hoc</italic> paired <italic>t</italic>-test with FDR correction shows class A microstate during MUS condition to have significantly lower occurrence than class A during BL (t = &#x02013;3.6758, df = 14, <italic>p</italic> &#x0003C; 0.01, d = &#x02013;0.9491) and PMS (t = &#x02013;3.4482, df = 14, <italic>p</italic> &#x0003C; 0.01, d = &#x02013;0.8903) conditions as shown in <xref ref-type="fig" rid="F9">Figure 9a</xref>. The class B microstate during the MUS condition has significantly lower occurrence compared to class B during BL (t = &#x02013;3.9214, df = 14, <italic>p</italic> &#x0003C; 0.01, d = &#x02013;1.0125) and PMS (t = &#x02013;4.1434, df = 14, <italic>p</italic> &#x0003C; 0.01, d = &#x02013;1.0698) conditions as shown in <xref ref-type="fig" rid="F9">Figure 9b</xref>. Class C microstate during MUS condition to have significantly lower occurrence than class C during BL (t = &#x02013;3.7033, df = 14, <italic>p</italic> &#x0003C; 0.01, d = &#x02013;0.9562) and PMS (t = &#x02013;4.1552, df = 14, <italic>p</italic> &#x0003C; 0.01, d = &#x02013;1.0729) conditions as shown in <xref ref-type="fig" rid="F9">Figure 9c</xref>. The class D microstate during the MUS condition has significantly lower occurrence than class D during BL (t = &#x02013;3.0188, df = 14, <italic>p</italic> &#x0003C; 0.05, d = &#x02013;0.7795) as shown in <xref ref-type="fig" rid="F9">Figure 9d</xref>.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p>Microstate parameters. <bold>(a)</bold> Relative occurrence of microstates class A across conditions. <bold>(b)</bold> Relative occurrence of microstates class B across conditions. <bold>(c)</bold> Relative occurrence of microstates class C across conditions. <bold>(d)</bold> Relative occurrence of microstates class D across conditions (**FDR corrected, <italic>p</italic> &#x0003C; 0.05; error bars = 1 SD).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0009.tif"/>
</fig>
<p><bold>Subjective ratings</bold>: The subjective ratings of memories recalled during SAR revealed mean scores of 4.2 (SD = 0.67) for vividness, 4.13 (SD = 0.74) for reliving, and 14.2 months (SD = 10.3) for the age of the memory. <xref ref-type="fig" rid="F10">Figure 10a</xref> shows mood assessment by the participants with a mean score of 3.9 (SD = 0.7) during the SAR state and 3.9 (SD = 1.3) during sad music listening, compared to the baseline mean score of 0.4 (SD = 1.9). The differences were significant for both SAR (t = &#x02013;8.663, df = 14, <italic>p</italic> &#x0003C; 0.001, d = &#x02013;2.236) and sad music (t = &#x02013;6.094, df = 14, <italic>p</italic> &#x0003C; 0.001, d = &#x02013;1.5735) when compared to the BL state. No significant difference was found between the SAR and sad music conditions. Re-experiencing emotions was the predominant self-regulatory goal during sad music, with a mean of 3.7917 (SD = 0.7858). Other self-regulatory goals observed included a mean of 3.333 (SD = 0.8772) for memory, 2.7778 (SD = 1.0209) for distraction, 3.0444 (SD = 0.9666) for cognition, and 3.2333 (SD = 0.6974) for friendship (<xref ref-type="fig" rid="F10">Figure 10b</xref>). Participants also unanimously reported positive experiences upon listening to sad music, with a mean score of 3.733 (SD = 0.7037), post SAR as shown in <xref ref-type="fig" rid="F10">Figure 10c</xref>.</p>
<fig id="F10" position="float">
<label>Figure 10</label>
<caption><p>Subjective ratings. <bold>(a)</bold> Depicts average rating of the experiences across all states: BL, SAR, and listening to sad music, <bold>(b)</bold> shows the subjective ratings for self-regulatory goals during MUS condition, and <bold>(c)</bold> shows the average assessment of sad music listening. Findings indicate a positive experience while listening to sad music. Error bars represent one standard deviation (error bars = 1 SD).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0010.tif"/>
</fig>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>4 Discussion</title>
<p>Music has been recognized for its ability to influence emotions and cognitive processes. Happy music has been found to boost intelligence (Rauscher et al., <xref ref-type="bibr" rid="B48">1993</xref>; Rideout and Laubach, <xref ref-type="bibr" rid="B50">1996</xref>) and attention (Putkinen et al., <xref ref-type="bibr" rid="B47">2017</xref>; Markovic et al., <xref ref-type="bibr" rid="B36">2017</xref>; J&#x000E4;ncke et al., <xref ref-type="bibr" rid="B26">2015</xref>), while sad music has been used to regulate emotions and cope with challenging situations (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>; Van den Tol and Edwards, <xref ref-type="bibr" rid="B68">2013</xref>; Hanser et al., <xref ref-type="bibr" rid="B23">2016</xref>). However, the specific brain microstates associated with these effects have not been fully understood. This study aimed to comparatively investigate the underlying microstates that contribute to the observed effects when listening to happy and sad Indian raga, in relation to BL and PMS conditions.</p>
<p>The first experiment investigated the effect of happy music listening. The subjective assessment shows that the musical stimulus successfully induced a moderate degree of happiness in the participants (<xref ref-type="fig" rid="F5">Figure 5d</xref>). Furthermore, we obtained four microstates that explained the variance (GEV) of 75.5, 77.4, and 74.43 during BL, MUS, and PMS conditions, respectively. These findings are illustrated in <xref ref-type="fig" rid="F2">Figures 2a</xref>, <xref ref-type="fig" rid="F3">3</xref>. The results revealed that the spatial distribution of these four microstates resembled those of the four classical microstates (Wang et al., <xref ref-type="bibr" rid="B72">2021</xref>; Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Pascual-Marqui et al., <xref ref-type="bibr" rid="B44">2014</xref>; Gu et al., <xref ref-type="bibr" rid="B18">2022</xref>), including earlier microstate studies involving musical stimulus (Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Gupta et al., <xref ref-type="bibr" rid="B21">2025</xref>).</p>
<p>The analysis of GEV and GFP during the course of happy music listening indicates that class D microstate exhibits significantly higher presence and increased electrical activity than all other microstates (<xref ref-type="fig" rid="F1">Figures 1b</xref>, <xref ref-type="fig" rid="F1">c</xref>). Furthermore, the analysis of GEV and coverage for correlation between class C and class D microstates demonstrates a robust negative association, consistent with previous research (Braboszcz and Delorme, <xref ref-type="bibr" rid="B4">2011</xref>) (<xref ref-type="fig" rid="F2">Figures 2d</xref>, <xref ref-type="fig" rid="F2">e</xref>). It is worth noting that class C microstate is associated with mind-wandering, especially self-referential thoughts and processes, while class D microstate is linked to attention, respectively (Khanna et al., <xref ref-type="bibr" rid="B29">2015</xref>; Michel and Koenig, <xref ref-type="bibr" rid="B38">2018</xref>; Koenig et al., <xref ref-type="bibr" rid="B32">2002</xref>; Tarailis et al., <xref ref-type="bibr" rid="B62">2023</xref>). This suggests that listening to happy music is linked with heightened attention, and when attention is heightened (class D), mind-wandering (including self-referential thoughts and processes) tends to be diminished during the course of listening to happy music. Additionally, we observed a strong positive correlation between the two microstates for GFP, as shown in <xref ref-type="fig" rid="F2">Figure 2f</xref>. This suggests that, while the electrical activity of both microstates exhibits a positive correlation during the course of listening to happy music, there is a negative correlation between their relative presence. The findings align with previous research involving Raga Darbari, which indicated enhanced attention and decreased interference from unwanted noise during the music listening experience (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>).</p>
<p>Furthermore, a comparative microstate analysis was conducted across three conditions: BL, MUS, and PSM. The analysis of GEV revealed a significantly greater presence of the class D microstate in the MUS condition than the other conditions (<xref ref-type="fig" rid="F4">Figure 4c</xref>). Both the MUS and PSM conditions also revealed a significantly reduced presence of class C microstate than the BL condition (<xref ref-type="fig" rid="F4">Figure 4b</xref>). The findings show that a happy music listening state is characterized by enhanced attention and diminished mind-wandering. This is further supported by the lower presence of the class B microstate during the MUS condition as compared to BL condition (<xref ref-type="fig" rid="F4">Figure 4a</xref>). Class B microstate has been linked to scenes and self-visualization (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B6">2019</xref>; Tarailis et al., <xref ref-type="bibr" rid="B62">2023</xref>), thereby indicating that mind-wandering might be reduced upon listening to happy music. However, further studies are needed to ascertain it. The results obtained in our study align with previous research on music (Gupta et al., <xref ref-type="bibr" rid="B19">2018</xref>, <xref ref-type="bibr" rid="B21">2025</xref>). GFP analysis revealed that the music listening condition (MUS) exhibited enhanced electrical activity than the baseline condition for the classes A and D microstates (<xref ref-type="fig" rid="F5">Figures 5a</xref>, <xref ref-type="fig" rid="F5">b</xref>). Increased GFP for class D indicates a more activated attention state, consistent with the above findings. On the other hand, the increase in GFP for class A suggests that auditory processing is enhanced during music listening than the baseline condition. These results align with a previous study that demonstrated an increased level of music awareness during the listening of happy music (Taruffi et al., <xref ref-type="bibr" rid="B64">2017</xref>).</p>
<p>The second experiment focused on the effect of sad music listening. Participants&#x00027; subjective mood assessments revealed an increased sad state during sad music listening (<xref ref-type="fig" rid="F10">Figure 10a</xref>). Although sadness was experienced in both the SAR state and sad MUS state, the self-regulatory questionnaire indicated a qualitative variation in the nature of this sadness. The questionnaire revealed that the sad musical excerpts facilitated the achievement of various self-regulatory goals, such as re-experiencing past emotions, enhancing mood, and evoking memories (<xref ref-type="fig" rid="F10">Figure 10b</xref>). Additionally, the results indicated that listening to sad music post adverse experience resulted in an overall positive experience (<xref ref-type="fig" rid="F10">Figure 10c</xref>), aligning with findings from previous studies (Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>; Van den Tol and Edwards, <xref ref-type="bibr" rid="B68">2013</xref>; Hanser et al., <xref ref-type="bibr" rid="B23">2016</xref>).</p>
<p>We further obtained four microstates that explained variance (GEV) of 77.95, 77.77, and 76.98 during BL, MUS, and PMS conditions, respectively. These results are depicted in <xref ref-type="fig" rid="F6">Figures 6A</xref>, <xref ref-type="fig" rid="F7">7</xref>. The results revealed that the spatial distribution of these four microstates resembled the classical four microstates (Wang et al., <xref ref-type="bibr" rid="B72">2021</xref>; Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Pascual-Marqui et al., <xref ref-type="bibr" rid="B44">2014</xref>; Gu et al., <xref ref-type="bibr" rid="B18">2022</xref>), including earlier microstate studies involving musical stimulus (Hu et al., <xref ref-type="bibr" rid="B24">2023</xref>; Gupta et al., <xref ref-type="bibr" rid="B21">2025</xref>). During the course of sad music listening, analyses of GEV and GFP indicate that class A microstate exhibits significantly lower presence and decreased electrical activity than all other microstates (<xref ref-type="fig" rid="F6">Figures 6b</xref>, <xref ref-type="fig" rid="F6">c</xref>).</p>
<p>Additionally, we conducted a comparative microstate analysis across three conditions: BL, MUS, and PSM. The GEV analysis showed that the presence of class C microstate during the PMS condition was significantly higher than the BL condition, as shown in <xref ref-type="fig" rid="F8">Figure 8a</xref>. Additionally, the presence of a class D microstate was significantly higher during the MUS condition than the BL and PMS states (<xref ref-type="fig" rid="F8">Figure 8b</xref>).</p>
<p>It is worth noting that the phenomenon of mind-wandering during sad music listening differs from ordinary mind-wandering, and is distinguished by its melancholic yet pleasurable nature (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>; Taruffi and Koelsch, <xref ref-type="bibr" rid="B63">2014</xref>; Sachs et al., <xref ref-type="bibr" rid="B51">2015</xref>). It involves the emergence of spontaneous, self-referential thoughts, emotions, and cognitive processes (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>; Taruffi and Koelsch, <xref ref-type="bibr" rid="B63">2014</xref>; Sachs et al., <xref ref-type="bibr" rid="B51">2015</xref>). This is also in alignment with the results obtained in the subjective assessment. Thus, the enhanced presence of class C and class D microstates as a result of listening to sad music signifies an enhanced process of mind-wandering, especially self-referential and attention, respectively. These findings are consistent with previous studies (Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>; Van den Tol and Edwards, <xref ref-type="bibr" rid="B68">2013</xref>; Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>).</p>
<p>Furthermore, the increased presence of class B microstate during sad music listening might indicate the involvement of scene and self-visualization with self-referential thoughts and memories during sad music listening. This aligns with earlier research (Br&#x000E9;chet et al., <xref ref-type="bibr" rid="B6">2019</xref>; Gupta et al., <xref ref-type="bibr" rid="B20">2023</xref>; Van den Tol and Edwards, <xref ref-type="bibr" rid="B68">2013</xref>; Van den Tol et al., <xref ref-type="bibr" rid="B69">2016</xref>); however, more investigations are needed to ascertain the fact.</p>
<p>The duration and GFP analysis show that regardless of the microstate class, the mean duration and GFP of microstates were higher during the MUS condition than the BL and PMS states, as shown in <xref ref-type="fig" rid="F8">Figures 8c</xref>, <xref ref-type="fig" rid="F8">d</xref>. This suggests that during sad music listening, there is a tendency for the brain microstates to persist for a longer duration with enhanced electrical activity.</p>
<p>Furthermore, the occurrence analysis showed that the frequency of occurrence for microstates was lower than other states during the MUS state, suggesting that the music state had a lower occurrence rate for microstates but with longer duration and larger GFP (<xref ref-type="fig" rid="F9">Figures 9a</xref>&#x02013;<xref ref-type="fig" rid="F9">d</xref>).</p>
<p>Moreover, analyses of GEV and coverage for correlation between class C and class D microstates during the course of listening to sad music reveal a moderate negative correlation. It is important to note that this relationship explains only a small amount of variance in the data, as indicated by low R-squared values (0.3025 and 0.3249), as shown in <xref ref-type="fig" rid="F6">Figures 6d</xref>, <xref ref-type="fig" rid="F6">e</xref>. This suggests that there are other factors and parameters that contribute to the unexplained variance in the data. Future investigations should explore these additional factors and parameters. However, different relationships between class C and class D microstates (for GEV and coverage) during happy and sad music are consistent with earlier studies and likely highlight the differences in the nature of mind-wandering (self-referential) process (class C microstates) between them (Taruffi et al., <xref ref-type="bibr" rid="B64">2017</xref>). We also observed a strong positive correlation between the two microstates (class C and class D) in terms of GFP, as shown in <xref ref-type="fig" rid="F6">Figure 6f</xref>. This suggests that the electrical activity of both microstates is enhanced during the course of listening to sad music and is in line with the happy music analysis.</p>
<p>In summary (<xref ref-type="fig" rid="F11">Figure 11</xref>), the present study underscores the impact of happy and sad music on various mental processes, particularly in modulating brain microstates. The key findings indicate that listening to music leads to longer microstate duration and improved attention. Furthermore, happy music specifically reduces mind-wandering, fostering sustained focus, whereas sad music enhances self-referential processing, aiding in self-regulation during emotionally challenging situations.</p>
<fig id="F11" position="float">
<label>Figure 11</label>
<caption><p>Schematic model illustrating the brain&#x00027;s responses during happy and sad music listening: <bold>(a)</bold> Happy music stimulates attention and reduces mind-wandering (depicted by blue color). <bold>(b)</bold> Sad music stimulates brain regions associated with attention, mind-wandering, particularly self-referential processes (depicted by yellow color).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnhum-19-1472689-g0011.tif"/>
</fig>
<p>This study&#x00027;s findings pave the way for personalized music therapy, cognitive training, and mental health interventions for conditions such as ADHD, depression, and anxiety. Music&#x00027;s impact on attention and self-regulation can enhance workplace productivity, education, and rehabilitation. Additionally, AI-driven adaptive music systems could tailor recommendations based on cognitive states. These insights have broad applications in healthcare, technology, and performance enhancement.</p></sec>
<sec id="s5">
<title>5 Limitations</title>
<p>While this study offers valuable insights, several limitations warrant further exploration. First, it focused on specific music genres, and incorporating a wider range of musical styles could deepen our understanding of music-induced brain dynamics. Additionally, the study was limited to male participants, underscoring the need for future research to examine potential gender differences. Furthermore, it did not consider how varying intensities of happiness and sadness influence microstates, particularly classes C and D. The lack of real-time subjective assessments of attention and mind-wandering also restricts insights into moment-to-moment cognitive fluctuations during music listening.</p>
<p>Future research utilizing a dense montage system with 64&#x0002B; electrodes and source localization analysis could provide a more precise understanding of the neural mechanisms underlying microstate changes, particularly in differentiating self-referential processes during sad music listening. Age can be a crucial factor, as it may influence both neural processing and microstate dynamics. Thus, the current findings need to be validated across different age groups. Furthermore, Studies 1 and 2 were conducted on separate sample groups. Future research using the same sample group for both musical stimuli would allow for a more detailed comparative analysis of microstate features specific to happy and sad music listening. Addressing these gaps will contribute to a more comprehensive understanding of music&#x00027;s effects on brain function and its therapeutic applications.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Indian Institute of Technology, Kanpur (IEC Communication no: IITK/IEC/2019&#x02013;20/I/18, IITK/IEC/2017&#x02013;18 I/3). The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>AG: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Software, Validation, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. CS: Data curation, Formal analysis, Investigation, Writing &#x02013; original draft. BB: Conceptualization, Project administration, Supervision, Visualization, Writing &#x02013; review &#x00026; editing. LB: Conceptualization, Funding acquisition, Project administration, Resources, Supervision, Validation, Visualization, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. The current work is partly supported by the research grant by the Central Mine Planning &#x00026; Design Institute Limited, Coal India Limited, India (CMPDIL-CIL/LB/511) and Indian Knowledge Systems Division of Ministry of Education, Govt of India (AICTE/IKS/RFPI/2021-22/01).</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec><sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnhum.2025.1472689/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnhum.2025.1472689/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.PDF" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Al Zoubi</surname> <given-names>O.</given-names></name> <name><surname>Mayeli</surname> <given-names>A.</given-names></name> <name><surname>Tsuchiyagaito</surname> <given-names>A.</given-names></name> <name><surname>Misaki</surname> <given-names>M.</given-names></name> <name><surname>Zotev</surname> <given-names>V.</given-names></name> <name><surname>Refai</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>EEG microstates temporal dynamics differentiate individuals with mood and anxiety disorders from healthy subjects</article-title>. <source>Front. Hum. Neurosci</source>. <volume>13</volume>:<fpage>56</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2019.00056</pub-id><pub-id pub-id-type="pmid">30863294</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Andrews-Hanna</surname> <given-names>J. R.</given-names></name> <name><surname>Reidler</surname> <given-names>J. S.</given-names></name> <name><surname>Huang</surname> <given-names>C.</given-names></name> <name><surname>Buckner</surname> <given-names>R. L.</given-names></name></person-group> (<year>2010a</year>). <article-title>Evidence for the default network&#x00027;s role in spontaneous cognition</article-title>. <source>J. Neurophysiol</source>. <volume>104</volume>, <fpage>322</fpage>&#x02013;<lpage>335</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00830.2009</pub-id><pub-id pub-id-type="pmid">20463201</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Andrews-Hanna</surname> <given-names>J. R.</given-names></name> <name><surname>Reidler</surname> <given-names>J. S.</given-names></name> <name><surname>Sepulcre</surname> <given-names>J.</given-names></name> <name><surname>Poulin</surname> <given-names>R.</given-names></name> <name><surname>Buckner</surname> <given-names>R. L.</given-names></name></person-group> (<year>2010b</year>). <article-title>Functional-anatomic fractionation of the brain&#x00027;s default network</article-title>. <source>Neuron</source> <volume>65</volume>, <fpage>550</fpage>&#x02013;<lpage>562</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2010.02.005</pub-id><pub-id pub-id-type="pmid">20188659</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Braboszcz</surname> <given-names>C.</given-names></name> <name><surname>Delorme</surname> <given-names>A.</given-names></name></person-group> (<year>2011</year>). <article-title>Lost in thoughts: neural markers of low alertness during mind wandering</article-title>. <source>Neuroimage</source> <volume>54</volume>, <fpage>3040</fpage>&#x02013;<lpage>3047</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.10.008</pub-id><pub-id pub-id-type="pmid">20946963</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brattico</surname> <given-names>E.</given-names></name> <name><surname>Alluri</surname> <given-names>V.</given-names></name> <name><surname>Bogert</surname> <given-names>B.</given-names></name> <name><surname>Jacobsen</surname> <given-names>T.</given-names></name> <name><surname>Vartiainen</surname> <given-names>N.</given-names></name> <name><surname>Nieminen</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>A functional mri study of happy and sad emotions in music with and without lyrics</article-title>. <source>Front. Psychol</source>. <volume>2</volume>:<fpage>308</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2011.00308</pub-id><pub-id pub-id-type="pmid">22144968</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Br&#x000E9;chet</surname> <given-names>L.</given-names></name> <name><surname>Brunet</surname> <given-names>D.</given-names></name> <name><surname>Birot</surname> <given-names>G.</given-names></name> <name><surname>Gruetter</surname> <given-names>R.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name> <name><surname>Jorge</surname> <given-names>J.</given-names></name></person-group> (<year>2019</year>). <article-title>Capturing the spatiotemporal dynamics of self-generated, task-initiated thoughts with EEG and fMRI</article-title>. <source>Neuroimage</source> <volume>194</volume>, <fpage>82</fpage>&#x02013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.03.029</pub-id><pub-id pub-id-type="pmid">30902640</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cantero</surname> <given-names>J. L.</given-names></name> <name><surname>Atienza</surname> <given-names>M.</given-names></name> <name><surname>Salas</surname> <given-names>R. M.</given-names></name> <name><surname>G&#x000F3;mez</surname> <given-names>C. M.</given-names></name></person-group> (<year>1999</year>). <article-title>Brain spatial microstates of human spontaneous alpha activity in relaxed wakefulness, drowsiness period, and rem sleep</article-title>. <source>Brain Topogr</source>. <volume>11</volume>, <fpage>257</fpage>&#x02013;<lpage>263</lpage>. <pub-id pub-id-type="doi">10.1023/A:1022213302688</pub-id><pub-id pub-id-type="pmid">10449257</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chaumon</surname> <given-names>M.</given-names></name> <name><surname>Bishop</surname> <given-names>D. V.</given-names></name> <name><surname>Busch</surname> <given-names>N. A.</given-names></name></person-group> (<year>2015</year>). <article-title>A practical guide to the selection of independent components of the electroencephalogram for artifact correction</article-title>. <source>J. Neurosci. Methods</source> <volume>250</volume>, <fpage>47</fpage>&#x02013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2015.02.025</pub-id><pub-id pub-id-type="pmid">25791012</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Ma</surname> <given-names>L.</given-names></name> <name><surname>Bo</surname> <given-names>H.</given-names></name> <name><surname>Soong</surname> <given-names>F.</given-names></name> <name><surname>Shi</surname> <given-names>Y.</given-names></name></person-group> (<year>2021</year>). <article-title>Dual-threshold-based microstate analysis on characterizing temporal dynamics of affective process and emotion recognition from EEG signals</article-title>. <source>Front. Neurosci</source>. <volume>15</volume>:<fpage>689791</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2021.689791</pub-id><pub-id pub-id-type="pmid">34335165</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Christoff</surname> <given-names>K.</given-names></name> <name><surname>Gordon</surname> <given-names>A. M.</given-names></name> <name><surname>Smallwood</surname> <given-names>J.</given-names></name> <name><surname>Smith</surname> <given-names>R.</given-names></name> <name><surname>Schooler</surname> <given-names>J. W.</given-names></name></person-group> (<year>2009</year>). <article-title>Experience sampling during fMRI reveals default network and executive system contributions to mind wandering</article-title>. <source>Proc. Nat. Acad. Sci</source>. <volume>106</volume>, <fpage>8719</fpage>&#x02013;<lpage>8724</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.0900234106</pub-id><pub-id pub-id-type="pmid">19433790</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coll</surname> <given-names>S. Y.</given-names></name> <name><surname>Vuichoud</surname> <given-names>N.</given-names></name> <name><surname>Grandjean</surname> <given-names>D.</given-names></name> <name><surname>James</surname> <given-names>C. E.</given-names></name></person-group> (<year>2019</year>). <article-title>Electrical neuroimaging of music processing in pianists with and without true absolute pitch</article-title>. <source>Front. Neurosci</source>. <volume>13</volume>:<fpage>142</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2019.00142</pub-id><pub-id pub-id-type="pmid">30967751</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Crespo-Garcia</surname> <given-names>M.</given-names></name> <name><surname>Atienza</surname> <given-names>M.</given-names></name> <name><surname>Cantero</surname> <given-names>J. L.</given-names></name></person-group> (<year>2008</year>). <article-title>Muscle artifact removal from human sleep EEG by using independent component analysis</article-title>. <source>Ann. Biomed. Eng</source>. <volume>36</volume>, <fpage>467</fpage>&#x02013;<lpage>475</lpage>. <pub-id pub-id-type="doi">10.1007/s10439-008-9442-y</pub-id><pub-id pub-id-type="pmid">18228142</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Custo</surname> <given-names>A.</given-names></name> <name><surname>Van De Ville</surname> <given-names>D.</given-names></name> <name><surname>Wells</surname> <given-names>W. M.</given-names></name> <name><surname>Tomescu</surname> <given-names>M. I.</given-names></name> <name><surname>Brunet</surname> <given-names>D.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name></person-group> (<year>2017</year>). <article-title>Electroencephalographic resting-state networks: source localization of microstates</article-title>. <source>Brain Connect</source>. <volume>7</volume>, <fpage>671</fpage>&#x02013;<lpage>682</lpage>. <pub-id pub-id-type="doi">10.1089/brain.2016.0476</pub-id><pub-id pub-id-type="pmid">28938855</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Delorme</surname> <given-names>A.</given-names></name> <name><surname>Makeig</surname> <given-names>S.</given-names></name></person-group> (<year>2004</year>). <article-title>EEGlab: an open source toolbox for analysis of single-trial EEG dynamics including independent component analysis</article-title>. <source>J. Neurosci. Methods</source> <volume>134</volume>, <fpage>9</fpage>&#x02013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2003.10.009</pub-id><pub-id pub-id-type="pmid">15102499</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>F&#x000E9;rat</surname> <given-names>V.</given-names></name> <name><surname>Seeber</surname> <given-names>M.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name> <name><surname>Ros</surname> <given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>Beyond broadband: towards a spectral decomposition of electroencephalography microstates</article-title>. <source>Hum. Brain Mapp</source>. <volume>43</volume>, <fpage>3047</fpage>&#x02013;<lpage>3061</lpage>. <pub-id pub-id-type="doi">10.1002/hbm.25834</pub-id><pub-id pub-id-type="pmid">35324021</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ford</surname> <given-names>J. H.</given-names></name> <name><surname>Addis</surname> <given-names>D. R.</given-names></name> <name><surname>Giovanello</surname> <given-names>K. S.</given-names></name></person-group> (<year>2011</year>). <article-title>Differential neural activity during search of specific and general autobiographical memories elicited by musical cues</article-title>. <source>Neuropsychologia</source> <volume>49</volume>, <fpage>2514</fpage>&#x02013;<lpage>2526</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2011.04.032</pub-id><pub-id pub-id-type="pmid">21600227</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goshvarpour</surname> <given-names>A.</given-names></name> <name><surname>Goshvarpour</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>EEG spectral powers and source localization in depressing, sad, and fun music videos focusing on gender differences</article-title>. <source>Cogn. Neurodyn</source>. <volume>13</volume>, <fpage>161</fpage>&#x02013;<lpage>173</lpage>. <pub-id pub-id-type="doi">10.1007/s11571-018-9516-y</pub-id><pub-id pub-id-type="pmid">30956720</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gu</surname> <given-names>F.</given-names></name> <name><surname>Gong</surname> <given-names>A.</given-names></name> <name><surname>Qu</surname> <given-names>Y.</given-names></name> <name><surname>Xiao</surname> <given-names>H.</given-names></name> <name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Nan</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Research on top archer&#x00027;s EEG microstates and source analysis in different states</article-title>. <source>Brain Sci</source>. <volume>12</volume>:<fpage>1017</fpage>. <pub-id pub-id-type="doi">10.3390/brainsci12081017</pub-id><pub-id pub-id-type="pmid">36009079</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>A.</given-names></name> <name><surname>Bhushan</surname> <given-names>B.</given-names></name> <name><surname>Behera</surname> <given-names>L.</given-names></name></person-group> (<year>2018</year>). <article-title>Short-term enhancement of cognitive functions and music: a three-channel model</article-title>. <source>Sci. Rep</source>. <volume>8</volume>, <fpage>1</fpage>&#x02013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-018-33618-1</pub-id><pub-id pub-id-type="pmid">30341361</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>A.</given-names></name> <name><surname>Bhushan</surname> <given-names>B.</given-names></name> <name><surname>Behera</surname> <given-names>L.</given-names></name></person-group> (<year>2023</year>). <article-title>Neural response to sad autobiographical recall and sad music listening post recall reveals distinct brain activation in alpha and gamma bands</article-title>. <source>PLoS ONE</source> <volume>18</volume>:<fpage>e0279814</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0279814</pub-id><pub-id pub-id-type="pmid">36607985</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>A.</given-names></name> <name><surname>Srivastava</surname> <given-names>C. K.</given-names></name> <name><surname>Bhushan</surname> <given-names>B.</given-names></name> <name><surname>Behera</surname> <given-names>L.</given-names></name></person-group> (<year>2025</year>). <article-title>A comparative study of EEG microstate dynamics during happy and sad music videos</article-title>. <source>Front. Hum. Neurosci</source>. <volume>18</volume>:<fpage>1469468</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2024.1469468</pub-id><pub-id pub-id-type="pmid">39980907</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gupta</surname> <given-names>U.</given-names></name> <name><surname>Gupta</surname> <given-names>B.</given-names></name></person-group> (<year>2016</year>). <article-title>Gender differences in psychophysiological responses to music listening</article-title>. <source>Music Med</source>. <volume>8</volume>, <fpage>53</fpage>&#x02013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.47513/mmd.v8i1.471</pub-id></citation>
</ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hanser</surname> <given-names>W. E.</given-names></name> <name><surname>ter Bogt</surname> <given-names>T. F.</given-names></name> <name><surname>Van den Tol</surname> <given-names>A. J.</given-names></name> <name><surname>Mark</surname> <given-names>R. E.</given-names></name> <name><surname>Vingerhoets</surname> <given-names>A. J.</given-names></name></person-group> (<year>2016</year>). <article-title>Consolation through music: a survey study</article-title>. <source>Musicae Sci</source>. <volume>20</volume>, <fpage>122</fpage>&#x02013;<lpage>137</lpage>. <pub-id pub-id-type="doi">10.1177/1029864915620264</pub-id></citation>
</ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>W.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Zhao</surname> <given-names>H.</given-names></name> <name><surname>Zhang</surname> <given-names>L.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Huang</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>EEG microstate correlates of emotion dynamics and stimulation content during video watching</article-title>. <source>Cerebral Cortex</source> <volume>33</volume>, <fpage>523</fpage>&#x02013;<lpage>542</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhac082</pub-id><pub-id pub-id-type="pmid">35262653</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Janata</surname> <given-names>P.</given-names></name></person-group> (<year>2009</year>). <article-title>The neural architecture of music-evoked autobiographical memories</article-title>. <source>Cerebral Cortex</source> <volume>19</volume>, <fpage>2579</fpage>&#x02013;<lpage>2594</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhp008</pub-id><pub-id pub-id-type="pmid">19240137</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>J&#x000E4;ncke</surname> <given-names>L.</given-names></name> <name><surname>K&#x000FC;hnis</surname> <given-names>J.</given-names></name> <name><surname>Rogenmoser</surname> <given-names>L.</given-names></name> <name><surname>Elmer</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>Time course of EEG oscillations during repeated listening of a well-known aria</article-title>. <source>Front. Hum. Neurosci</source>. <volume>9</volume>:<fpage>401</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2015.00401</pub-id><pub-id pub-id-type="pmid">26257624</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname> <given-names>H.</given-names></name> <name><surname>Zhao</surname> <given-names>S.</given-names></name> <name><surname>Wu</surname> <given-names>Q.</given-names></name> <name><surname>Cao</surname> <given-names>Y.</given-names></name> <name><surname>Zhou</surname> <given-names>W.</given-names></name> <name><surname>Gong</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Dragon boat exercise reshapes the temporal-spatial dynamics of the brain</article-title>. <source>PeerJ</source> <volume>12</volume>:<fpage>e17623</fpage>. <pub-id pub-id-type="doi">10.7717/peerj.17623</pub-id><pub-id pub-id-type="pmid">38952974</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kar</surname> <given-names>S.</given-names></name> <name><surname>Ganguly</surname> <given-names>T.</given-names></name> <name><surname>Roy</surname> <given-names>S.</given-names></name> <name><surname>Goswami</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>Effect of indian classical music (raga therapy) on fentanyl, vecuronium, propofol requirements and cortisol levels in cardiopulmonary bypass</article-title>. <source>J. Anesth. Crit. Care Open Access</source> <volume>2</volume>:<fpage>00047</fpage>. <pub-id pub-id-type="doi">10.15406/jaccoa.2015.02.00047</pub-id></citation>
</ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Khanna</surname> <given-names>A.</given-names></name> <name><surname>Pascual-Leone</surname> <given-names>A.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name> <name><surname>Farzan</surname> <given-names>F.</given-names></name></person-group> (<year>2015</year>). <article-title>Microstates in resting-state EEG: current status and future directions</article-title>. <source>Neurosci. Biobehav. Rev</source>. <volume>49</volume>, <fpage>105</fpage>&#x02013;<lpage>113</lpage>. <pub-id pub-id-type="doi">10.1016/j.neubiorev.2014.12.010</pub-id><pub-id pub-id-type="pmid">25526823</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>K.</given-names></name> <name><surname>Duc</surname> <given-names>N.</given-names></name> <name><surname>Choi</surname> <given-names>M.</given-names></name> <name><surname>Lee</surname> <given-names>B.</given-names></name></person-group> (<year>2021</year>). <article-title>EEG microstate features according to performance on a mental arithmetic task</article-title>. <source>Sci. Rep</source>. <volume>11</volume>, <fpage>1</fpage>&#x02013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-79423-7</pub-id><pub-id pub-id-type="pmid">33431963</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koenig</surname> <given-names>T.</given-names></name> <name><surname>Lehmann</surname> <given-names>D.</given-names></name> <name><surname>Merlo</surname> <given-names>M. C.</given-names></name> <name><surname>Kochi</surname> <given-names>K.</given-names></name> <name><surname>Hell</surname> <given-names>D.</given-names></name> <name><surname>Koukkou</surname> <given-names>M.</given-names></name></person-group> (<year>1999</year>). <article-title>A deviant EEG brain microstate in acute, neuroleptic-naive schizophrenics at rest</article-title>. <source>Eur. Arch. Psychiatry Clin. Neurosci</source>. <volume>249</volume>, <fpage>205</fpage>&#x02013;<lpage>211</lpage>. <pub-id pub-id-type="doi">10.1007/s004060050088</pub-id><pub-id pub-id-type="pmid">10449596</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koenig</surname> <given-names>T.</given-names></name> <name><surname>Prichep</surname> <given-names>L.</given-names></name> <name><surname>Lehmann</surname> <given-names>D.</given-names></name> <name><surname>Sosa</surname> <given-names>P. V.</given-names></name> <name><surname>Braeker</surname> <given-names>E.</given-names></name> <name><surname>Kleinlogel</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Millisecond by millisecond, year by year: normative EEG microstates and developmental stages</article-title>. <source>Neuroimage</source> <volume>16</volume>, <fpage>41</fpage>&#x02013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.1006/nimg.2002.1070</pub-id><pub-id pub-id-type="pmid">11969316</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kucyi</surname> <given-names>A.</given-names></name> <name><surname>Salomons</surname> <given-names>T. V.</given-names></name> <name><surname>Davis</surname> <given-names>K. D.</given-names></name></person-group> (<year>2013</year>). <article-title>Mind wandering away from pain dynamically engages antinociceptive and default mode brain networks</article-title>. <source>Proc. Nat. Acad. Sci</source>. <volume>110</volume>, <fpage>18692</fpage>&#x02013;<lpage>18697</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1312902110</pub-id><pub-id pub-id-type="pmid">24167282</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>H.</given-names></name> <name><surname>Tang</surname> <given-names>H.</given-names></name> <name><surname>Wei</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>G.</given-names></name> <name><surname>Du</surname> <given-names>Y.</given-names></name> <name><surname>Ruan</surname> <given-names>J.</given-names></name></person-group> (<year>2021</year>). <article-title>Altered peri-seizure EEG microstate dynamics in patients with absence epilepsy</article-title>. <source>Seizure</source> <volume>88</volume>, <fpage>15</fpage>&#x02013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1016/j.seizure.2021.03.020</pub-id><pub-id pub-id-type="pmid">33799135</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mammarella</surname> <given-names>N.</given-names></name> <name><surname>Fairfield</surname> <given-names>B.</given-names></name> <name><surname>Cornoldi</surname> <given-names>C.</given-names></name></person-group> (<year>2007</year>). <article-title>Does music enhance cognitive performance in healthy older adults? the vivaldi effect</article-title>. <source>Aging Clin. Exp. Res</source>. <volume>19</volume>, <fpage>394</fpage>&#x02013;<lpage>399</lpage>. <pub-id pub-id-type="doi">10.1007/BF03324720</pub-id><pub-id pub-id-type="pmid">18007118</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Markovic</surname> <given-names>A.</given-names></name> <name><surname>K&#x000FC;hnis</surname> <given-names>J.</given-names></name> <name><surname>J&#x000E4;ncke</surname> <given-names>L.</given-names></name></person-group> (<year>2017</year>). <article-title>Task context influences brain activation during music listening</article-title>. <source>Front. Hum. Neurosci</source>. <volume>11</volume>:<fpage>342</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2017.00342</pub-id><pub-id pub-id-type="pmid">28706480</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Mason</surname> <given-names>M. F.</given-names></name> <name><surname>Norton</surname> <given-names>M. I.</given-names></name> <name><surname>Van Horn</surname> <given-names>J. D.</given-names></name> <name><surname>Wegner</surname> <given-names>D. M.</given-names></name> <name><surname>Grafton</surname> <given-names>S. T.</given-names></name> <name><surname>Macrae</surname> <given-names>C. N.</given-names></name></person-group> (<year>2007</year>). <article-title>Wandering minds: the default network and stimulus-independent thought</article-title>. <source>Science</source> <volume>315</volume>, <fpage>393</fpage>&#x02013;<lpage>395</lpage>. <pub-id pub-id-type="doi">10.1126/science.1131295</pub-id><pub-id pub-id-type="pmid">17234951</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Michel</surname> <given-names>C. M.</given-names></name> <name><surname>Koenig</surname> <given-names>T.</given-names></name></person-group> (<year>2018</year>). <article-title>EEG microstates as a tool for studying the temporal dynamics of whole-brain neuronal networks: a review</article-title>. <source>Neuroimage</source> <volume>180</volume>, <fpage>577</fpage>&#x02013;<lpage>593</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2017.11.062</pub-id><pub-id pub-id-type="pmid">29196270</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Milz</surname> <given-names>P.</given-names></name> <name><surname>Pascual-Marqui</surname> <given-names>R. D.</given-names></name> <name><surname>Achermann</surname> <given-names>P.</given-names></name> <name><surname>Kochi</surname> <given-names>K.</given-names></name> <name><surname>Faber</surname> <given-names>P. L.</given-names></name></person-group> (<year>2017</year>). <article-title>The EEG microstate topography is predominantly determined by intracortical sources in the alpha band</article-title>. <source>Neuroimage</source> <volume>162</volume>, <fpage>353</fpage>&#x02013;<lpage>361</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2017.08.058</pub-id><pub-id pub-id-type="pmid">28847493</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Murray</surname> <given-names>M. M.</given-names></name> <name><surname>Brunet</surname> <given-names>D.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name></person-group> (<year>2008</year>). <article-title>Topographic erp analyses: a step-by-step tutorial review</article-title>. <source>Brain Topogr</source>. <volume>20</volume>, <fpage>249</fpage>&#x02013;<lpage>264</lpage>. <pub-id pub-id-type="doi">10.1007/s10548-008-0054-5</pub-id><pub-id pub-id-type="pmid">18347966</pub-id></citation></ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Neubauer</surname> <given-names>A. C.</given-names></name> <name><surname>Fink</surname> <given-names>A.</given-names></name></person-group> (<year>2009</year>). <article-title>Intelligence and neural efficiency</article-title>. <source>Neuroscience &#x00026;Biobehav. Rev</source>. <volume>33</volume>, <fpage>1004</fpage>&#x02013;<lpage>1023</lpage>. <pub-id pub-id-type="doi">10.1016/j.neubiorev.2009.04.001</pub-id><pub-id pub-id-type="pmid">19580915</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nishida</surname> <given-names>K.</given-names></name> <name><surname>Morishima</surname> <given-names>Y.</given-names></name> <name><surname>Yoshimura</surname> <given-names>M.</given-names></name> <name><surname>Isotani</surname> <given-names>T.</given-names></name> <name><surname>Irisawa</surname> <given-names>S.</given-names></name> <name><surname>Jann</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>EEG microstates associated with salience and frontoparietal networks in frontotemporal dementia, schizophrenia and Alzheimer&#x00027;s disease</article-title>. <source>Clin. Neurophysiol</source>. <volume>124</volume>, <fpage>1106</fpage>&#x02013;<lpage>1114</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2013.01.005</pub-id><pub-id pub-id-type="pmid">23403263</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pal</surname> <given-names>A.</given-names></name> <name><surname>Behari</surname> <given-names>M.</given-names></name> <name><surname>Goyal</surname> <given-names>V.</given-names></name> <name><surname>Sharma</surname> <given-names>R.</given-names></name></person-group> (<year>2021</year>). <article-title>Study of EEG microstates in Parkinson&#x00027;s disease: a potential biomarker?</article-title> <source>Cogn. Neurodyn</source>. <volume>15</volume>, <fpage>463</fpage>&#x02013;<lpage>471</lpage>. <pub-id pub-id-type="doi">10.1007/s11571-020-09643-0</pub-id><pub-id pub-id-type="pmid">34040672</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pascual-Marqui</surname> <given-names>R. D.</given-names></name> <name><surname>Lehmann</surname> <given-names>D.</given-names></name> <name><surname>Faber</surname> <given-names>P.</given-names></name> <name><surname>Milz</surname> <given-names>P.</given-names></name> <name><surname>Kochi</surname> <given-names>K.</given-names></name> <name><surname>Yoshimura</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>The resting microstate networks (RMN): cortical distributions, dynamics, and frequency specific information flow</article-title>. <source>arXiv preprint arXiv:1411.1949</source>.</citation>
</ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poulsen</surname> <given-names>A. T.</given-names></name> <name><surname>Pedroni</surname> <given-names>A.</given-names></name> <name><surname>Langer</surname> <given-names>N.</given-names></name> <name><surname>Hansen</surname> <given-names>L. K.</given-names></name></person-group> (<year>2018</year>). <article-title>Microstate EEGlab toolbox: an introductory guide</article-title>. <source>BioRxiv, 289850</source>. <pub-id pub-id-type="doi">10.1101/289850</pub-id></citation>
</ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Prete</surname> <given-names>G.</given-names></name> <name><surname>Croce</surname> <given-names>P.</given-names></name> <name><surname>Zappasodi</surname> <given-names>F.</given-names></name> <name><surname>Tommasi</surname> <given-names>L.</given-names></name> <name><surname>Capotosto</surname> <given-names>P.</given-names></name></person-group> (<year>2022</year>). <article-title>Exploring brain activity for positive and negative emotions by means of EEG microstates</article-title>. <source>Sci. Rep</source>. <volume>12</volume>:<fpage>3404</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-07403-0</pub-id><pub-id pub-id-type="pmid">35233057</pub-id></citation></ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Putkinen</surname> <given-names>V.</given-names></name> <name><surname>Makkonen</surname> <given-names>T.</given-names></name> <name><surname>Eerola</surname> <given-names>T.</given-names></name></person-group> (<year>2017</year>). <article-title>Music-induced positive mood broadens the scope of auditory attention</article-title>. <source>Soc. Cogn. Affect. Neurosci</source>. <volume>12</volume>, <fpage>1159</fpage>&#x02013;<lpage>1168</lpage>. <pub-id pub-id-type="doi">10.1093/scan/nsx038</pub-id><pub-id pub-id-type="pmid">28460035</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rauscher</surname> <given-names>F. H.</given-names></name> <name><surname>Shaw</surname> <given-names>G. L.</given-names></name> <name><surname>Ky</surname> <given-names>C. N.</given-names></name></person-group> (<year>1993</year>). <article-title>Music and spatial task performance</article-title>. <source>Nature</source> <volume>365</volume>, <fpage>611</fpage>&#x02013;<lpage>611</lpage>. <pub-id pub-id-type="doi">10.1038/365611a0</pub-id><pub-id pub-id-type="pmid">8413624</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rauscher</surname> <given-names>F. H.</given-names></name> <name><surname>Shaw</surname> <given-names>G. L.</given-names></name> <name><surname>Ky</surname> <given-names>K. N.</given-names></name></person-group> (<year>1995</year>). <article-title>Listening to mozart enhances spatial-temporal reasoning: towards a neurophysiological basis</article-title>. <source>Neurosci. Lett</source>. <volume>185</volume>, <fpage>44</fpage>&#x02013;<lpage>47</lpage>. <pub-id pub-id-type="doi">10.1016/0304-3940(94)11221-4</pub-id><pub-id pub-id-type="pmid">7731551</pub-id></citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rideout</surname> <given-names>B. E.</given-names></name> <name><surname>Laubach</surname> <given-names>C. M.</given-names></name></person-group> (<year>1996</year>). <article-title>EEG correlates of enhanced spatial performance following exposure to music</article-title>. <source>Percept. Mot. Skills</source> <volume>82</volume>, <fpage>427</fpage>&#x02013;<lpage>432</lpage>. <pub-id pub-id-type="doi">10.2466/pms.1996.82.2.427</pub-id><pub-id pub-id-type="pmid">8724912</pub-id></citation></ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sachs</surname> <given-names>M. E.</given-names></name> <name><surname>Damasio</surname> <given-names>A.</given-names></name> <name><surname>Habibi</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). <article-title>The pleasures of sad music: a systematic review</article-title>. <source>Front. Hum. Neurosci</source>. <volume>9</volume>:<fpage>404</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2015.00404</pub-id><pub-id pub-id-type="pmid">26257625</pub-id></citation></ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>S&#x000E4;rk&#x000E4;m&#x000F6;</surname> <given-names>T.</given-names></name></person-group> (<year>2018</year>). <article-title>Cognitive, emotional, and neural benefits of musical leisure activities in aging and neurological rehabilitation: a critical review</article-title>. <source>Ann. Phys. Rehabil. Med</source>. <volume>61</volume>, <fpage>414</fpage>&#x02013;<lpage>418</lpage>. <pub-id pub-id-type="doi">10.1016/j.rehab.2017.03.006</pub-id><pub-id pub-id-type="pmid">28461128</pub-id></citation></ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schellenberg</surname> <given-names>E. G.</given-names></name> <name><surname>Hallam</surname> <given-names>S.</given-names></name></person-group> (<year>2005</year>). <article-title>Music listening and cognitive abilities in 10-and 11-year-olds: the blur effect</article-title>. <source>Ann. N. Y. Acad. Sci</source>. <volume>1060</volume>, <fpage>202</fpage>&#x02013;<lpage>209</lpage>. <pub-id pub-id-type="doi">10.1196/annals.1360.013</pub-id><pub-id pub-id-type="pmid">16597767</pub-id></citation></ref>
<ref id="B54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schellenberg</surname> <given-names>E. G.</given-names></name> <name><surname>Nakata</surname> <given-names>T.</given-names></name> <name><surname>Hunter</surname> <given-names>P. G.</given-names></name> <name><surname>Tamoto</surname> <given-names>S.</given-names></name></person-group> (<year>2007</year>). <article-title>Exposure to music and cognitive performance</article-title>. <source>Psychol. Music</source> <volume>35</volume>, <fpage>5</fpage>&#x02013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1177/0305735607068885</pub-id></citation>
</ref>
<ref id="B55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schiller</surname> <given-names>B.</given-names></name> <name><surname>Kleinert</surname> <given-names>T.</given-names></name> <name><surname>Teige-Mocigemba</surname> <given-names>S.</given-names></name> <name><surname>Klauer</surname> <given-names>K. C.</given-names></name> <name><surname>Heinrichs</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Temporal dynamics of resting EEG networks are associated with prosociality</article-title>. <source>Sci. Rep</source>. <volume>10</volume>:<fpage>13066</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-69999-5</pub-id><pub-id pub-id-type="pmid">32747655</pub-id></citation></ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seitzman</surname> <given-names>B. A.</given-names></name> <name><surname>Abell</surname> <given-names>M.</given-names></name> <name><surname>Bartley</surname> <given-names>S. C.</given-names></name> <name><surname>Erickson</surname> <given-names>M. A.</given-names></name> <name><surname>Bolbecker</surname> <given-names>A. R.</given-names></name> <name><surname>Hetrick</surname> <given-names>W. P.</given-names></name></person-group> (<year>2017</year>). <article-title>Cognitive manipulation of brain electric microstates</article-title>. <source>Neuroimage</source> <volume>146</volume>, <fpage>533</fpage>&#x02013;<lpage>543</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2016.10.002</pub-id><pub-id pub-id-type="pmid">27742598</pub-id></citation></ref>
<ref id="B57">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Shen</surname> <given-names>X.</given-names></name> <name><surname>Hu</surname> <given-names>X.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name> <name><surname>Song</surname> <given-names>S.</given-names></name> <name><surname>Zhang</surname> <given-names>D.</given-names></name></person-group> (<year>2020</year>). <article-title>&#x0201C;Exploring EEG microstates for affective computing: decoding valence and arousal experiences during video watching,&#x0201D;</article-title> in <source>2020 42nd Annual International Conference of the IEEE Engineering in Medicine &#x00026;Biology Society (EMBC)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>841</fpage>&#x02013;<lpage>846</lpage>. <pub-id pub-id-type="doi">10.1109/EMBC44109.2020.9175482</pub-id><pub-id pub-id-type="pmid">33018116</pub-id></citation></ref>
<ref id="B58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Siritunga</surname> <given-names>S.</given-names></name> <name><surname>Wijewardena</surname> <given-names>K.</given-names></name> <name><surname>Ekanayaka</surname> <given-names>R.</given-names></name> <name><surname>Mudunkotuwa</surname> <given-names>P.</given-names></name></person-group> (<year>2013</year>). <article-title>Effect of music on blood pressure, pulse rate and respiratory rate of asymptomatic individuals: a randomized controlled trial</article-title>. <source>Health</source> <volume>5</volume>, <fpage>59</fpage>&#x02013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.4236/health.2013.54A008</pub-id></citation>
</ref>
<ref id="B59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Skrandies</surname> <given-names>W.</given-names></name></person-group> (<year>1990</year>). <article-title>Global field power and topographic similarity</article-title>. <source>Brain Topogr</source>. <volume>3</volume>, <fpage>137</fpage>&#x02013;<lpage>141</lpage>. <pub-id pub-id-type="doi">10.1007/BF01128870</pub-id><pub-id pub-id-type="pmid">2094301</pub-id></citation></ref>
<ref id="B60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Soni</surname> <given-names>S.</given-names></name> <name><surname>Muthukrishnan</surname> <given-names>S. P.</given-names></name> <name><surname>Samanchi</surname> <given-names>R.</given-names></name> <name><surname>Sood</surname> <given-names>M.</given-names></name> <name><surname>Kaur</surname> <given-names>S.</given-names></name> <name><surname>Sharma</surname> <given-names>R.</given-names></name></person-group> (<year>2019</year>). <article-title>Pre-trial and pre-response EEG microstates in schizophrenia: an endophenotypic marker</article-title>. <source>Behav. Brain Res</source>. <volume>371</volume>:<fpage>111964</fpage>. <pub-id pub-id-type="doi">10.1016/j.bbr.2019.111964</pub-id><pub-id pub-id-type="pmid">31129232</pub-id></citation></ref>
<ref id="B61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tait</surname> <given-names>L.</given-names></name> <name><surname>Tamagnini</surname> <given-names>F.</given-names></name> <name><surname>Stothart</surname> <given-names>G.</given-names></name> <name><surname>Barvas</surname> <given-names>E.</given-names></name> <name><surname>Monaldini</surname> <given-names>C.</given-names></name> <name><surname>Frusciante</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>EEG microstate complexity for aiding early diagnosis of Alzheimer&#x00027;s disease</article-title>. <source>Sci. Rep</source>. <volume>10</volume>, <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-74790-7</pub-id><pub-id pub-id-type="pmid">33077823</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tarailis</surname> <given-names>P.</given-names></name> <name><surname>Koenig</surname> <given-names>T.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name> <name><surname>Gri&#x00161;kova-Bulanova</surname> <given-names>I.</given-names></name></person-group> (<year>2023</year>). <article-title>The functional aspects of resting EEG microstates: a systematic review</article-title>. <source>Brain Topogr</source>. <volume>37</volume>, <fpage>181</fpage>&#x02013;<lpage>217</lpage>. <pub-id pub-id-type="doi">10.1007/s10548-023-00958-9</pub-id><pub-id pub-id-type="pmid">37162601</pub-id></citation></ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Taruffi</surname> <given-names>L.</given-names></name> <name><surname>Koelsch</surname> <given-names>S.</given-names></name></person-group> (<year>2014</year>). <article-title>The paradox of music-evoked sadness: an online survey</article-title>. <source>PLoS ONE</source> <volume>9</volume>:<fpage>e110490</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0110490</pub-id><pub-id pub-id-type="pmid">25330315</pub-id></citation></ref>
<ref id="B64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Taruffi</surname> <given-names>L.</given-names></name> <name><surname>Pehrs</surname> <given-names>C.</given-names></name> <name><surname>Skouras</surname> <given-names>S.</given-names></name> <name><surname>Koelsch</surname> <given-names>S.</given-names></name></person-group> (<year>2017</year>). <article-title>Effects of sad and happy music on mind-wandering and the default mode network</article-title>. <source>Sci. Rep</source>. <volume>7</volume>:<fpage>14396</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-017-14849-0</pub-id><pub-id pub-id-type="pmid">29089542</pub-id></citation></ref>
<ref id="B65">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ter Bogt</surname> <given-names>T. F.</given-names></name> <name><surname>Vieno</surname> <given-names>A.</given-names></name> <name><surname>Doornwaard</surname> <given-names>S. M.</given-names></name> <name><surname>Pastore</surname> <given-names>M.</given-names></name> <name><surname>Van den Eijnden</surname> <given-names>R. J.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;You&#x00027;re not alone&#x0201D;: music as a source of consolation among adolescents and young adults</article-title>. <source>Psychol. Music</source> <volume>45</volume>, <fpage>155</fpage>&#x02013;<lpage>171</lpage>. <pub-id pub-id-type="doi">10.1177/0305735616650029</pub-id></citation>
</ref>
<ref id="B66">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Terpou</surname> <given-names>B. A.</given-names></name> <name><surname>Shaw</surname> <given-names>S. B.</given-names></name> <name><surname>Th&#x000E9;berge</surname> <given-names>J.</given-names></name> <name><surname>F&#x000E9;rat</surname> <given-names>V.</given-names></name> <name><surname>Michel</surname> <given-names>C. M.</given-names></name> <name><surname>McKinnon</surname> <given-names>M. C.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Spectral decomposition of EEG microstates in post-traumatic stress disorder</article-title>. <source>NeuroImage: Clin</source>. <volume>35</volume>:<fpage>103135</fpage>. <pub-id pub-id-type="doi">10.1016/j.nicl.2022.103135</pub-id><pub-id pub-id-type="pmid">36002969</pub-id></citation></ref>
<ref id="B67">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Trost</surname> <given-names>W.</given-names></name> <name><surname>Ethofer</surname> <given-names>T.</given-names></name> <name><surname>Zentner</surname> <given-names>M.</given-names></name> <name><surname>Vuilleumier</surname> <given-names>P.</given-names></name></person-group> (<year>2012</year>). <article-title>Mapping aesthetic musical emotions in the brain</article-title>. <source>Cerebral Cortex</source> <volume>22</volume>, <fpage>2769</fpage>&#x02013;<lpage>2783</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhr353</pub-id><pub-id pub-id-type="pmid">22178712</pub-id></citation></ref>
<ref id="B68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van den Tol</surname> <given-names>A. J.</given-names></name> <name><surname>Edwards</surname> <given-names>J.</given-names></name></person-group> (<year>2013</year>). <article-title>Exploring a rationale for choosing to listen to sad music when feeling sad</article-title>. <source>Psychol. Music</source> <volume>41</volume>, <fpage>440</fpage>&#x02013;<lpage>465</lpage>. <pub-id pub-id-type="doi">10.1177/0305735611430433</pub-id></citation>
</ref>
<ref id="B69">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van den Tol</surname> <given-names>A. J.</given-names></name> <name><surname>Edwards</surname> <given-names>J.</given-names></name> <name><surname>Heflick</surname> <given-names>N. A.</given-names></name></person-group> (<year>2016</year>). <article-title>Sad music as a means for acceptance-based coping</article-title>. <source>Musicae Scient</source>. <volume>20</volume>, <fpage>68</fpage>&#x02013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1177/1029864915627844</pub-id></citation>
</ref>
<ref id="B70">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Verrusio</surname> <given-names>W.</given-names></name> <name><surname>Ettorre</surname> <given-names>E.</given-names></name> <name><surname>Vicenzini</surname> <given-names>E.</given-names></name> <name><surname>Vanacore</surname> <given-names>N.</given-names></name> <name><surname>Cacciafesta</surname> <given-names>M.</given-names></name> <name><surname>Mecarelli</surname> <given-names>O.</given-names></name></person-group> (<year>2015</year>). <article-title>The mozart effect: a quantitative EEG study</article-title>. <source>Conscious. Cogn</source>. <volume>35</volume>, <fpage>150</fpage>&#x02013;<lpage>155</lpage>. <pub-id pub-id-type="doi">10.1016/j.concog.2015.05.005</pub-id><pub-id pub-id-type="pmid">26036835</pub-id></citation></ref>
<ref id="B71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>von Wegner</surname> <given-names>F.</given-names></name> <name><surname>Bauer</surname> <given-names>S.</given-names></name> <name><surname>Rosenow</surname> <given-names>F.</given-names></name> <name><surname>Triesch</surname> <given-names>J.</given-names></name> <name><surname>Laufs</surname> <given-names>H.</given-names></name></person-group> (<year>2021</year>). <article-title>EEG microstate periodicity explained by rotating phase patterns of resting-state alpha oscillations</article-title>. <source>Neuroimage</source> <volume>224</volume>:<fpage>117372</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.117372</pub-id><pub-id pub-id-type="pmid">32979526</pub-id></citation></ref>
<ref id="B72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Ding</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Yang</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Differences in EEG microstate induced by gaming: a comparison between the gaming disorder individual, recreational game users and healthy controls</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>32549</fpage>&#x02013;<lpage>32558</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2021.3060112</pub-id></citation>
</ref>
<ref id="B73">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Whittle</surname> <given-names>S.</given-names></name> <name><surname>Y&#x000FC;cel</surname> <given-names>M.</given-names></name> <name><surname>Yap</surname> <given-names>M. B.</given-names></name> <name><surname>Allen</surname> <given-names>N. B.</given-names></name></person-group> (<year>2011</year>). <article-title>Sex differences in the neural correlates of emotion: evidence from neuroimaging</article-title>. <source>Biol. Psychol</source>. <volume>87</volume>, <fpage>319</fpage>&#x02013;<lpage>333</lpage>. <pub-id pub-id-type="doi">10.1016/j.biopsycho.2011.05.003</pub-id><pub-id pub-id-type="pmid">21600956</pub-id></citation></ref>
<ref id="B74">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wilkins</surname> <given-names>R. W.</given-names></name> <name><surname>Hodges</surname> <given-names>D. A.</given-names></name> <name><surname>Laurienti</surname> <given-names>P. J.</given-names></name> <name><surname>Steen</surname> <given-names>M.</given-names></name> <name><surname>Burdette</surname> <given-names>J. H.</given-names></name></person-group> (<year>2014</year>). <article-title>Network science and the effects of music preference on functional brain connectivity: from beethoven to eminem</article-title>. <source>Sci. Rep</source>. <volume>4</volume>, <fpage>1</fpage>&#x02013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1038/srep06130</pub-id><pub-id pub-id-type="pmid">25167363</pub-id></citation></ref>
<ref id="B75">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wilson</surname> <given-names>T. L.</given-names></name> <name><surname>Brown</surname> <given-names>T. L.</given-names></name></person-group> (<year>1997</year>). <article-title>Reexamination of the effect of mozart&#x00027;s music on spatial-task performance</article-title>. <source>J. Psychol</source>. <volume>131</volume>, <fpage>365</fpage>&#x02013;<lpage>370</lpage>. <pub-id pub-id-type="doi">10.1080/00223989709603522</pub-id></citation>
</ref>
<ref id="B76">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yeshurun</surname> <given-names>Y.</given-names></name> <name><surname>Nguyen</surname> <given-names>M.</given-names></name> <name><surname>Hasson</surname> <given-names>U.</given-names></name></person-group> (<year>2021</year>). <article-title>The default mode network: where the idiosyncratic self meets the shared social world</article-title>. <source>Nat. Rev. Neurosci</source>. <volume>22</volume>, <fpage>181</fpage>&#x02013;<lpage>192</lpage>. <pub-id pub-id-type="doi">10.1038/s41583-020-00420-w</pub-id><pub-id pub-id-type="pmid">33483717</pub-id></citation></ref>
<ref id="B77">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zanesco</surname> <given-names>A. P.</given-names></name> <name><surname>Skwara</surname> <given-names>A. C.</given-names></name> <name><surname>King</surname> <given-names>B. G.</given-names></name> <name><surname>Powers</surname> <given-names>C.</given-names></name> <name><surname>Wineberg</surname> <given-names>K.</given-names></name> <name><surname>Saron</surname> <given-names>C. D.</given-names></name></person-group> (<year>2021</year>). <article-title>Meditation training modulates brain electric microstates and felt states of awareness</article-title>. <source>Hum. Brain Mapp</source>. <volume>42</volume>, <fpage>3228</fpage>&#x02013;<lpage>3252</lpage>. <pub-id pub-id-type="doi">10.1002/hbm.25430</pub-id><pub-id pub-id-type="pmid">33783922</pub-id></citation></ref>
<ref id="B78">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>K.</given-names></name> <name><surname>Shi</surname> <given-names>W.</given-names></name> <name><surname>Wang</surname> <given-names>C.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>Liu</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Reliability of EEG microstate analysis at different electrode densities during propofol-induced transitions of brain states</article-title>. <source>Neuroimage</source> <volume>231</volume>:<fpage>117861</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2021.117861</pub-id><pub-id pub-id-type="pmid">33592245</pub-id></citation></ref>
<ref id="B79">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zulliger</surname> <given-names>J.</given-names></name> <name><surname>Diaz Hernandez</surname> <given-names>L.</given-names></name> <name><surname>Koenig</surname> <given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>Within and between subject spectral fingerprints of EEG-microstate parameters</article-title>. <source>Brain Topogr</source>. <volume>35</volume>, <fpage>277</fpage>&#x02013;<lpage>281</lpage>. <pub-id pub-id-type="doi">10.1007/s10548-022-00896-y</pub-id><pub-id pub-id-type="pmid">35414139</pub-id></citation></ref>
</ref-list>
</back>
</article> 