<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychiatry</journal-id>
<journal-title-group>
<journal-title>Frontiers in Psychiatry</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychiatry</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-0640</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyt.2026.1764932</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>A novel deep learning model for objective quantification of generalized anxiety disorder severity using EEG functional connectivity</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Luo</surname><given-names>Xiaodong</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3299578/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Cui</surname><given-names>Yuhuan</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Yan</surname><given-names>Zihao</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3321690/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname><given-names>Wei</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2540373/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhou</surname><given-names>Bin</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2770279/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Li</surname><given-names>Gang</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1002213/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Liu</surname><given-names>Shouqing</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>The Second Hospital of Jinhua</institution>, <city>Jinhua</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>College of Mathematical Medicine, Zhejiang Normal University</institution>, <city>Jinhua</city>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Gang Li, <email xlink:href="mailto:ligang@zjnu.cn">ligang@zjnu.cn</email>; Shouqing Liu, <email xlink:href="mailto:shouqing.liu@outlook.com">shouqing.liu@outlook.com</email></corresp>
<fn fn-type="equal" id="fn003">
<p>&#x2020;These authors have contributed equally to this work and share first authorship</p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1764932</elocation-id>
<history>
<date date-type="received">
<day>18</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>24</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Luo, Cui, Yan, Liu, Zhou, Li and Liu.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Luo, Cui, Yan, Liu, Zhou, Li and Liu</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Generalized anxiety disorder (GAD) is a prevalent and disabling psychiatric condition, yet its severity is still assessed mainly through clinical interviews and self-report scales, which lack objective neurobiological markers. This study aimed to develop an electroencephalography (EEG)-based deep learning (DL) model for objective quantification of GAD severity based on functional connectivity (FC) features. Resting-state EEG was recorded for 10 min from 80 patients with GAD and 39 healthy controls (HC). EEG segments with window lengths between 2 and 10 s were used to compute band-limited FC features, which were then used as input to a convolutional gated multilayer perceptron (Conv_gMLP) network for continuous prediction of the Hamilton Anxiety Rating Scale (HAM-A) total scores. The Conv_gMLP model achieved a mean absolute error (MAE) of 0.32 &#xb1; 0.07 in predicting the HAM-A total score (range: 0&#x2013;56), outperforming conventional machine learning (ML) models and other DL architectures. Feature attribution analyses indicated that connectivity between frontal and temporal regions, particularly in the beta frequency range, contributed most strongly to the prediction of GAD severity. These findings suggest that EEG FC and beta rhythms encode clinically meaningful information about GAD severity, and that Conv_gMLP-based models may provide a promising tool for objective, time-efficient assessment to support individualized treatment planning.</p>
</abstract>
<kwd-group>
<kwd>beta rhythm</kwd>
<kwd>deep learning</kwd>
<kwd>electroencephalography</kwd>
<kwd>functional connectivity</kwd>
<kwd>gated multilayer perceptron</kwd>
<kwd>generalized anxiety disorder</kwd>
<kwd>severity assessment</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Jinhua Science and Technology Bureau</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100008092</institution-id>
</institution-wrap>
</funding-source>
<award-id rid="sp1">2023-3-154 &#x548c; 2023-3-157</award-id>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was funded in part by the Jinhua Science and Technology Bureau, grant numbers 2023-3-154 and 2023-3-157; and in part by the National Undergraduate Training Program on Innovation and Entrepreneurship, grant numbers 202410345049.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="9"/>
<equation-count count="16"/>
<ref-count count="58"/>
<page-count count="13"/>
<word-count count="7958"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Computational Psychiatry</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>GAD is a chronic and recurrent mental illness characterized by excessive and uncontrollable worry across multiple aspects of life (<xref ref-type="bibr" rid="B1">1</xref>). It is frequently accompanied by symptoms such as irritability, difficulty concentrating, muscle tension, headaches, restlessness, and sleep disturbances, which significantly impair daily functioning and quality of life (<xref ref-type="bibr" rid="B2">2</xref>). In today&#x2019;s fast-paced society, GAD has become one of the most prevalent psychiatric disorders (<xref ref-type="bibr" rid="B3">3</xref>), with recent studies reporting a prevalence rate as high as 6.0% (<xref ref-type="bibr" rid="B4">4</xref>). GAD not only disrupts social functioning and reduces overall well-being but also imposes a substantial economic burden on healthcare systems (<xref ref-type="bibr" rid="B5">5</xref>, <xref ref-type="bibr" rid="B6">6</xref>). Notably, the severity of GAD is closely associated with the extent of social dysfunction (<xref ref-type="bibr" rid="B7">7</xref>), and patients with varying severity levels require tailored treatment strategies. Personalized treatment plans based on GAD severity can facilitate precision medicine, optimizing therapeutic outcomes (<xref ref-type="bibr" rid="B8">8</xref>). Therefore, accurately identifying and assessing the severity of GAD in a timely manner is crucial for effective patient management.</p>
<p>Although extensive research on GAD has been conducted over the years, an objective and quantitative assessment method for effectively evaluating the severity of GAD remains lacking in clinical practice. Currently, the evaluation of GAD severity primarily relies on clinical assessments and patient self-reports, both of which are inherently subjective (<xref ref-type="bibr" rid="B9">9</xref>). These evaluations are susceptible to individual differences and sociocultural influences, potentially compromising their reliability and consistency (<xref ref-type="bibr" rid="B10">10</xref>). The Hamilton Anxiety Rating Scale (HAM-A) is a widely used clinical instrument for assessing anxiety severity (<xref ref-type="bibr" rid="B11">11</xref>), and its total score served as the reference standard for severity labeling in this study (<xref ref-type="bibr" rid="B12">12</xref>). While the HAM-A is widely used in clinical settings to categorize anxiety severity into mild, moderate, or severe (<xref ref-type="bibr" rid="B13">13</xref>), it also presents several limitations. Its symptom descriptions often lack specificity, and the scoring process may be imprecise, further affecting the objectivity of severity assessment. Given these shortcomings, there is an urgent need to explore novel auxiliary diagnostic methods to enhance the accuracy and standardization of GAD severity assessment.</p>
<p>Although a variety of techniques have been adopted in current clinical research to assist in the auxiliary diagnosis and assessment of GAD&#x2014;including EEG (<xref ref-type="bibr" rid="B14">14</xref>), functional magnetic resonance imaging (<xref ref-type="bibr" rid="B15">15</xref>), Facial Action Coding System (<xref ref-type="bibr" rid="B16">16</xref>), heart rate variability (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B18">18</xref>)&#x2014;studies have suggested that EEG may offer distinct advantages in evaluating the severity of the disorder (<xref ref-type="bibr" rid="B19">19</xref>). Owing to its non-invasive nature, low cost, and high temporal resolution, EEG remains a preferred modality for investigating human brain electrophysiology and cognitive function (<xref ref-type="bibr" rid="B20">20</xref>). EEG captures functional differences between brain regions by recording the dynamic electrical activity on the cortical surface or scalp. Neural activity can be reflected across multiple functional regions (<xref ref-type="bibr" rid="B21">21</xref>), and synchronous activity between different brain regions is referred to as functional connectivity (FC). Quantifying FC requires the application of specific computational estimators, and a variety of EEG-based FC estimators have been developed and widely utilized in the field of neuropsychiatry. For instance, Ayd&#x131;n et&#xa0;al. (<xref ref-type="bibr" rid="B22">22</xref>) demonstrated reduced segregation in resting-state EEG functional networks in patients with Alzheimer&#x2019;s disease through EEG-based network analysis. Another study employed spectral coherence combined with ML to classify emotion regulation strategies (<xref ref-type="bibr" rid="B23">23</xref>). Among various estimators, Phase Lag Index (PLI) quantifies phase synchronization by assessing the asymmetry in the distribution of non-zero phase lags between signals, offering distinct advantages in estimating genuine brain connectivity. By focusing specifically on phase relationships between signals, PLI demonstrates enhanced stability and reliability in quantifying synchrony across different brain regions. Therefore, this work employs resting-state FC networks constructed using PLI, with the aim of objectively and continuously quantifying symptom severity in patients with GAD.</p>
<p>Currently, EEG-based research on GAD primarily focuses on classification tasks, typically involving binary classification (distinguishing healthy individuals from GAD patients) or multiclass classification (identifying mild, moderate, and severe GAD) (<xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>). However, such approaches largely remain at the level of categorical label prediction, falling short of meeting the clinical need for continuous, refined, and individualized assessment of GAD severity. Existing studies have shown that EEG combined with ML technology has great potential in the objective assessment of anxiety disorders (<xref ref-type="bibr" rid="B26">26</xref>&#x2013;<xref ref-type="bibr" rid="B31">31</xref>), especially demonstrating excellent performance in the extraction and interpretation of high-dimensional EEG features (<xref ref-type="bibr" rid="B32">32</xref>). In comparison, DL algorithms, with their multi-layered neural network architectures, are capable of automatically extracting complex and abstract features from raw EEG signals, demonstrating superior performance in complex pattern recognition tasks (<xref ref-type="bibr" rid="B33">33</xref>). DL&#x2019;s end-to-end learning capability, which integrates feature extraction and classification, has yielded remarkable results in handling high-dimensional, complex physiological data (<xref ref-type="bibr" rid="B34">34</xref>). In the broader field of EEG analysis, spatio-temporal representation learning has shown significant promise. For instance, in EEG-based emotion recognition, the STRFLNet model effectively captures both spatial and temporal features through representation fusion learning, demonstrating the importance of integrating multi-dimensional information for accurate brain state assessment (<xref ref-type="bibr" rid="B35">35</xref>). Meanwhile, some researchers have applied DL to EEG connectivity to assess the severity of social anxiety disorder, achieving objective prediction of anxiety severity (<xref ref-type="bibr" rid="B33">33</xref>). Although previous studies have indicated that DL models can assist in the diagnosis of GAD (<xref ref-type="bibr" rid="B36">36</xref>), reports on their use in precise and continuous severity assessment remain limited. Given this background, the integration of EEG techniques with advanced DL architectures holds promise for building high-accuracy predictive frameworks, enabling more efficient evaluation of GAD severity and further advancing its clinical application in precision diagnostics.</p>
<p>Building upon these considerations, a critical gap persists in the EEG-based deep-learning literature on GAD: the scarcity of frameworks that quantify symptom severity on a continuous, individual-level scale. To address this, the present study develops an EEG-based regression framework. This framework utilizes PLI-derived FC features and a novel Convolutional Gated Multilayer Perceptron (Conv_gMLP) architecture to predict the HAM-A total score. The central hypothesis is that resting-state FC patterns encode signatures relevant to clinical severity. This hypothesis is subsequently tested through systematic ablation studies of the Conv_gMLP architecture and an analysis of the impact of temporal resolution (time-window length) on predictive performance. Collectively, this work aims to bridge conventional symptom-based ratings with objective, neurophysiology-derived representations of GAD severity.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="s2_1">
<label>2.1</label>
<title>Participants</title>
<p>The EEG data for this study were obtained from a local hospital. The GAD group consisted of 80 patients diagnosed with GAD, and the HC group included 39 HC. Patients with GAD were diagnosed by psychiatric experts using structured clinical interviews according to the Diagnostic and Statistical Manual of Mental Disorders, Fifth Edition (DSM-5) criteria, and anxiety severity was assessed using the HAM-A. The HAM-A consists of 14 items, each scored on a 0&#x2013;4 ordinal scale, yielding a total score ranging from 0 to 56; higher scores indicate greater anxiety severity. In this study, the HAM-A total score was used as the reference standard for model training and evaluation. HC scores were retained as observed labels and included to represent the lower end of the continuous severity spectrum, without additional near-zero remapping. Detailed demographic information is presented in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>. As shown, there were no significant differences in age or sex between the two groups (age: <italic>p</italic> = 0.06; sex: <italic>p</italic> = 0.87), whereas the HAM-A scores showed a significant difference between the two groups (<italic>p</italic> = 1.14 &#xd7; 10<sup>&#x2212;24</sup>). To ensure data accuracy and reliability, all participants were required to meet specific inclusion criteria. First, all participants with GAD were first-time clinic visitors and had not taken any medications at the time of EEG acquisition. Furthermore, any participant with a current of using psychoactive medications (including but not limited to benzodiazepines, antidepressants, antipsychotics, or stimulants) was excluded. Second, all participants were right-handed, and individuals with epilepsy, neurodegenerative diseases, stroke, schizophrenia, or other psychiatric disorders were excluded. Additionally, participants with severe cardiopulmonary dysfunction, significant hepatic or renal impairment, malignant tumors, or autoimmune diseases were not eligible. These conditions and their treatments may influence cerebral oxygenation, metabolic status, inflammatory status, and neurophysiological activity, thereby potentially altering EEG rhythms and FC. To minimize potential confounding factors, participants were instructed to maintain adequate sleep the night before data collection and to refrain from smoking, as well as from consuming coffee or strong tea, within 8 hours prior to testing. Furthermore, individuals with a history of substance or alcohol abuse were excluded from the study. Lastly, participants were required to have no prior history of brain injury. This study was approved by the Ethics Committee of Zhejiang Normal University. Written informed consent was obtained from all participants prior to their involvement in the study.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Demographic and clinical characteristics of participants.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Characteristic</th>
<th valign="middle" align="center">HC (n=39)</th>
<th valign="middle" align="center">GAD (n=80)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Age (years)</td>
<td valign="middle" align="center">21-63<break/>(37.3 &#xb1; 12.62)</td>
<td valign="middle" align="center">22-74<break/>(48.8 &#xb1; 11.2)</td>
</tr>
<tr>
<td valign="middle" align="center">Sex (Male/Female)</td>
<td valign="middle" align="center">12/27</td>
<td valign="middle" align="center">21/59</td>
</tr>
<tr>
<td valign="middle" align="center">HAM-A Score</td>
<td valign="middle" align="center">2.3 &#xb1; 0.9</td>
<td valign="middle" align="center">24.6 &#xb1; 8.1</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_2">
<label>2.2</label>
<title>EEG data acquisition and preprocessing</title>
<p>All EEG recordings were conducted in a specialized EEG acquisition room within the hospital to ensure a high-quality and standardized data collection process. Participants were instructed to stay awake, keep their eyes closed, and remain relaxed during each session, which lasted 10 min to record resting-state EEG signals. EEG data were acquired using a Nicolet EEG TS215605 system, with electrodes placed according to the international 10&#x2013;20 system. A total of 16 electrodes were selected, specifically FP1, FP2, F3, F4, C3, C4, P3, P4, O1, O2, F7, F8, T3, T4, T5, and T6, with bilateral mastoids serving as reference electrodes. To ensure signal precision, the sampling rate was set to 250 Hz, allowing for the capture of subtle variations in EEG signals. Furthermore, electrode impedance was maintained below 5 k&#x2126; to minimize signal interference and enhance data reliability.</p>
<p>The acquired EEG data underwent a series of preprocessing steps to ensure data quality and reliability: (1) Downsampling and Filtering: The raw EEG data were downsampled to 125 Hz and processed using a fourth-order Butterworth bandpass filter, restricting the frequency range to 4&#x2013;30 Hz to remove unwanted low- and high-frequency noise. (2) Fast independent component analysis was employed to effectively remove artifacts, including eye blinks, electrocardiographic interference, and electromyographic noise, ensuring the extraction of clean EEG signals reflective of genuine neural activity. (3) Segmentation: The continuous EEG recordings were segmented into shorter epochs to capture distinct neural activity patterns and facilitate subsequent feature extraction. (4) Frequency Band Extraction: EEG signals were decomposed into distinct frequency bands to analyze different neural oscillations, including theta (4&#x2013;8 Hz), alpha1 (8&#x2013;10 Hz), alpha2 (10&#x2013;13 Hz), and beta (13&#x2013;30 Hz) rhythms. These preprocessing steps ensured that the EEG data were clean, reliable, and suitable for further analysis and research.</p>
</sec>
<sec id="s2_3">
<label>2.3</label>
<title>Feature extraction</title>
<p>In this study, PLI was extracted as a key feature for the quantitative assessment of anxiety severity in GAD patients. Compared to other time-domain and nonlinear features, PLI was considered more reliable in terms of stability. Time-domain features may exhibit fluctuations across different time points, leading to inconsistencies. Nonlinear features are highly susceptible to signal noise, affecting their robustness. PLI has been widely recognized for its advantages in assessing FC in brain networks (<xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B38">38</xref>). Unlike time-domain features that can fluctuate with time and nonlinear features that are sensitive to noise, PLI focuses on the phase relationship between signals, which is more stable and reliable for quantifying synchronization between different brain regions (<xref ref-type="bibr" rid="B39">39</xref>). The utilization of PLI features helps minimize instability-related noise, thus enabling a more accurate representation of EEG activity.</p>
<p>Given two preprocessed EEG signals, <inline-formula>
<mml:math display="inline" id="im1"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> and <inline-formula>
<mml:math display="inline" id="im2"><mml:mrow><mml:msub><mml:mi>x</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula>, the instantaneous phase information is first obtained using the Hilbert Transform, as shown in <xref ref-type="disp-formula" rid="eq1">Equation 1</xref>:</p>
<disp-formula id="eq1"><label>(1)</label>
<mml:math display="block" id="M1"><mml:mrow><mml:mtext>&#x3a6;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mtext>x</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mtext>t</mml:mtext><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mtext>j</mml:mtext><mml:mfrac><mml:mn>1</mml:mn><mml:mi>&#x3c0;</mml:mi></mml:mfrac><mml:mi>P</mml:mi><mml:mo>.</mml:mo><mml:mi>V</mml:mi><mml:mo>.</mml:mo><mml:mstyle displaystyle="true"><mml:mrow><mml:msubsup><mml:mo>&#x222b;</mml:mo><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>&#x221e;</mml:mi></mml:mrow><mml:mi>&#x221e;</mml:mi></mml:msubsup><mml:mrow><mml:mfrac><mml:mrow><mml:mtext>x</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>&#x3c4;</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x3c4;</mml:mi></mml:mrow></mml:mfrac><mml:mi>d</mml:mi><mml:mi>&#x3c4;</mml:mi></mml:mrow></mml:mrow></mml:mstyle></mml:mrow></mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im3"><mml:mrow><mml:mi>j</mml:mi><mml:mtext>&#xa0;</mml:mtext></mml:mrow></mml:math></inline-formula> represents the imaginary unit, and <inline-formula>
<mml:math display="inline" id="im4"><mml:mrow><mml:mi>P</mml:mi><mml:mo>.</mml:mo><mml:mi>V</mml:mi><mml:mo>.</mml:mo><mml:mtext>&#xa0;</mml:mtext></mml:mrow></mml:math></inline-formula> denotes the Cauchy principal value. At each time point <inline-formula>
<mml:math display="inline" id="im5"><mml:mi>t</mml:mi></mml:math></inline-formula>, PLI can be used to assess the phase synchronization between <inline-formula>
<mml:math display="inline" id="im6"><mml:mrow><mml:msub><mml:mrow><mml:mtext>&#xa0;&#x3a6;</mml:mtext></mml:mrow><mml:mn>1</mml:mn></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mtext>&#xa0;</mml:mtext></mml:mrow></mml:math></inline-formula> and <inline-formula>
<mml:math display="inline" id="im7"><mml:mrow><mml:msub><mml:mrow><mml:mtext>&#xa0;&#x3a6;</mml:mtext></mml:mrow><mml:mn>2</mml:mn></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mtext>&#xa0;</mml:mtext></mml:mrow></mml:math></inline-formula> by computing the phase difference. The phase difference is calculated as shown in <xref ref-type="disp-formula" rid="eq2">Equation 2</xref>:</p>
<disp-formula id="eq2"><label>(2)</label>
<mml:math display="block" id="M2"><mml:mrow><mml:mtext>&#x394;&#x3a6;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mtext>t</mml:mtext><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mtext>arg</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mtext>&#x3a6;</mml:mtext><mml:mn>1</mml:mn></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mi>arg</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mi>&#x3a6;</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im8"><mml:mrow><mml:mtext>arg&#xa0;</mml:mtext></mml:mrow></mml:math></inline-formula> denotes the phase angle of a complex number. Subsequently, the signum function of the phase difference is defined as shown in <xref ref-type="disp-formula" rid="eq3">Equation 3</xref>:</p>
<disp-formula id="eq3"><label>(3)</label>
<mml:math display="block" id="M3"><mml:mrow><mml:mi>f</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mtable><mml:mtr><mml:mtd><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mtext>&#x394;&#x3a6;</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mtext>t</mml:mtext><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>&#x3f5;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>&#x2212;</mml:mo><mml:mtext>&#x3c0;</mml:mtext><mml:mo>,</mml:mo><mml:mtext>&#x3c0;</mml:mtext></mml:mrow><mml:mo stretchy="false">]</mml:mo></mml:mrow></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>o</mml:mi><mml:mi>t</mml:mi><mml:mi>h</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>w</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>e</mml:mi><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mo>&#xa0;</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>Finally, the PLI is defined as shown in <xref ref-type="disp-formula" rid="eq4">Equation 4</xref>:</p>
<disp-formula id="eq4"><label>(4)</label>
<mml:math display="block" id="M4"><mml:mrow><mml:mi>P</mml:mi><mml:mi>L</mml:mi><mml:mi>I</mml:mi><mml:mo>=</mml:mo><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:mrow><mml:mo>&#x2329;</mml:mo><mml:mrow><mml:mi>f</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>&#x232a;</mml:mo></mml:mrow></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im9"><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mo>&#xb7;</mml:mo><mml:mo>|</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> denotes the mean operation, while <inline-formula>
<mml:math display="inline" id="im10"><mml:mrow><mml:mo>&#xa0;</mml:mo><mml:mrow><mml:mo>&#x2329;</mml:mo><mml:mo>&#xb7;</mml:mo><mml:mo>&#x232a;</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> represents the absolute value. The PLI value ranges from 0 to 1, indicating the degree of phase synchronization between brain regions. A PLI value close to 0 suggests an absence of phase synchronization, whereas a value approaching 1 indicates strong phase synchronization.</p>
<p>In this study, four frequency bands were analyzed: theta (4&#x2013;8 Hz), alpha1 (8&#x2013;10 Hz), alpha2 (10&#x2013;13 Hz), and beta (13&#x2013;30 Hz). For each frequency band, PLI was calculated for all possible electrode pair combinations, yielding N &#xd7; (N - 1)/2 features, where N = 16 in this study. Consequently, a total of <inline-formula>
<mml:math display="inline" id="im11"><mml:mrow><mml:mo>&#xa0;</mml:mo><mml:mn>4</mml:mn><mml:mo>&#xd7;</mml:mo><mml:mn>120</mml:mn><mml:mo>=</mml:mo><mml:mn>480</mml:mn></mml:mrow></mml:math></inline-formula> features were extracted across the four frequency bands.</p>
</sec>
<sec id="s2_4">
<label>2.4</label>
<title>Conv_gMLP</title>
<p>This section introduces the Conv_gMLP model, an innovative approach proposed for predicting the severity of GAD. The inspiration for Conv_gMLP originates from the gMLP module, which has demonstrated strong performance in image and natural language processing tasks (<xref ref-type="bibr" rid="B40">40</xref>). Given its frequent application in tabular data tasks, it is commonly referred to as tab_gMLP. In this study, the gMLP architecture is refined and extended to create the Conv_gMLP model. This novel gMLP model uses PLI features derived from quantitative EEG analysis as input. The Conv_gMLP model consists of three key components: channel expansion convolution module, the gMLP module, and feature aggregation. As illustrated in <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>, the Conv_gMLP Model architecture is constructed by stacking N identical gMLP blocks of the same structure and size. All projection operations within the model are linear, where &#x2297; represents element-wise multiplication (linear gating) and &#x2295; denotes element-wise addition (residual connection). Additionally, ablation experiments were conducted on different gMLP model architectures to evaluate the effectiveness of Conv_gMLP in predicting GAD severity.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Schematic diagram of the Conv_gMLP model architecture with spatial gating units.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1764932-g001.tif">
<alt-text content-type="machine-generated">Diagram illustrating a neural network architecture. It begins with “Inputs” leading to “Conv1d”, followed by “Norm”, “Channel Proj”, “Activation”, and a “Spatial Gating Unit”. The process iterates \(N\) times and continues through “AvePool” and “Linear”. The “Spatial Gating Unit” includes a dashed box with “Split”, “Norm”, and “Spatial Proj” components. Arrows indicate data flow direction.</alt-text>
</graphic></fig>
<sec id="s2_4_1">
<label>2.4.1</label>
<title>Channel expansion convolutional model</title>
<p>The traditional gMLP model is not suitable for processing inputs in the form of one-dimensional feature vectors. To address this, a 1D convolution (Conv1D) with a kernel size of 1 and a stride of 1 is applied to the 1&#xd7;480 PLI feature input. This operation expands the number of channels, transforming and enhancing the features to enhance their representational capacity and diversity. This process is referred to as the channel expansion convolution module, with the specific formula shown in <xref ref-type="disp-formula" rid="eq5">Equation 5</xref>:</p>
<disp-formula id="eq5"><label>(5)</label>
<mml:math display="block" id="M5"><mml:mrow><mml:mover accent="true"><mml:mi>x</mml:mi><mml:mo>&#x2dc;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mi>C</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mi>v</mml:mi><mml:mn>1</mml:mn><mml:mi>d</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mi>k</mml:mi><mml:mi>e</mml:mi><mml:mi>r</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi><mml:mo>_</mml:mo><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>z</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>s</mml:mi><mml:mi>t</mml:mi><mml:mi>r</mml:mi><mml:mi>i</mml:mi><mml:mi>d</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im12"><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> represents the number of kernels, and <inline-formula>
<mml:math display="inline" id="im13"><mml:mrow><mml:mi>x</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msup><mml:mi>&#x211d;</mml:mi><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#xd7;</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula> is the feature vector input to the model, where <inline-formula>
<mml:math display="inline" id="im14"><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> denotes the number of features. The output after channel expansion is denoted as <inline-formula>
<mml:math display="inline" id="im15"><mml:mrow><mml:mover accent="true"><mml:mi>x</mml:mi><mml:mo>&#x2dc;</mml:mo></mml:mover><mml:mo>&#x2208;</mml:mo><mml:msup><mml:mi>&#x211d;</mml:mi><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>&#xd7;</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula>.</p>
<p>The introduction of Conv1D is not merely a simple channel expansion operation; rather, it introduces parameterized spatial transformations in the feature dimension through convolutional operations. This operation can be viewed as a feature extraction process, which extracts more discriminative feature representations from the raw features, thus providing more valuable input for subsequent prediction tasks. With a kernel size and stride of 1, each convolutional kernel slides continuously over the feature dimension, focusing on individual feature points. Through local perception and abstraction, it captures subtle changes and patterns in the feature dimension. Furthermore, the use of multiple convolutional kernels allows each kernel to capture distinct abstract features along the feature dimension. This design enables the model to learn a broader range of feature representations at different levels, enhancing the model&#x2019;s ability to express features in higher-dimensional spaces.</p>
</sec>
<sec id="s2_4_2">
<label>2.4.2</label>
<title>gMLP module</title>
<p>The &#x201c;g&#x201d; in gMLP stands for gating, also known as the gated attention mechanism, which is a simple yet powerful attention mechanism that has been widely applied in neural network models in recent years. This mechanism dynamically adjusts the importance of the input, allowing the model to selectively focus on specific parts of the input, thereby enhancing the model&#x2019;s performance and generalization ability. Liu et&#xa0;al. introduced a model called gMLP (<xref ref-type="bibr" rid="B40">40</xref>), which integrates gating into the Multi-Layer Perceptron (MLP) architecture. This model has demonstrated remarkable performance in key language and vision tasks.</p>
<p>The detailed pseudocode for a standard gMLP block is presented in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>. Each gMLP block further performs channel projection (CP) and statically parameterized spatial projection on the output data from the previous layer, aiming to enhance the representation and interaction of deep features. As shown in <xref ref-type="disp-formula" rid="eq6">Equations 6</xref>&#x2013;<xref ref-type="disp-formula" rid="eq8">8</xref>, for simplicity, normalization, residual connections, and bias terms are omitted in the equations:</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Pseudo-code for the gMLP block.</p>
</caption>
<table frame="hsides">
<tbody>
<tr>
<td valign="middle" align="left"><bold><italic>def</italic></bold> <italic>gmlp_block(x, d_channel, d_ffn):</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;shortcut = x</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;x = norm(x)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;x = proj(x, d_ffn&#xd7;2, axis=&#x201c;channel&#x201d;)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;x = gelu(x)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;x =</italic> sp<italic>atial_gating_unit(x)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;x = proj(x, d_channel, axis=&#x201c;channel&#x201d;)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><bold><italic>return</italic></bold> <italic>x + shortcut</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><bold><italic>def</italic></bold> sp<italic>atial_gating_unit(x):</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;u, v =</italic> sp<italic>lit(x, axis=&#x201c;channel&#x201d;)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;v = norm(v)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><italic>&#x2003;v = proj(v, d_feature, axis=&#x201c;spatial&#x201d;, init_bias=1)</italic></td>
</tr>
<tr>
<td valign="middle" align="left"><bold><italic>return</italic></bold> <italic>u&#xd7;v</italic></td>
</tr>
</tbody>
</table>
</table-wrap>
<disp-formula id="eq6"><label>(6)</label>
<mml:math display="block" id="M6"><mml:mrow><mml:mi>Z</mml:mi><mml:mo>=</mml:mo><mml:mi>&#x3c3;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>X</mml:mi><mml:mi>U</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq7"><label>(7)</label>
<mml:math display="block" id="M7"><mml:mrow><mml:mover accent="true"><mml:mi>Z</mml:mi><mml:mo>&#x2dc;</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mi>s</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>Z</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq8"><label>(8)</label>
<mml:math display="block" id="M8"><mml:mrow><mml:mi>Y</mml:mi><mml:mo>=</mml:mo><mml:mover accent="true"><mml:mi>Z</mml:mi><mml:mo>&#x2dc;</mml:mo></mml:mover><mml:mi>V</mml:mi></mml:mrow></mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im16"><mml:mi>&#x3c3;</mml:mi></mml:math></inline-formula> represents the activation function, specifically the Gelu activation function used in this study. <inline-formula>
<mml:math display="inline" id="im17"><mml:mrow><mml:mi>X</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msup><mml:mi>&#x211d;</mml:mi><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub><mml:mo>&#xd7;</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula> denotes the output from the previous layer, while U and V define linear projections along the channel direction, with neuron counts of <inline-formula>
<mml:math display="inline" id="im18"><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#xd7;</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>f</mml:mi><mml:mi>n</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula> and <inline-formula>
<mml:math display="inline" id="im19"><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula>, respectively. <inline-formula>
<mml:math display="inline" id="im20"><mml:mrow><mml:mi>s</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mo>&#xb7;</mml:mo><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math></inline-formula> refers to the Spatial Gating Unit (SGU), which performs spatial projection on high-dimensional features. SGU employs a simple spatial linear mapping and gating operation, which includes spatial dimensional operations, allowing it to capture interactions between spatial high-dimensional features and thereby enhance the representation of key features. SGU first splits the input data Z along the channel dimension into two parts (<inline-formula>
<mml:math display="inline" id="im21"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> and <inline-formula>
<mml:math display="inline" id="im22"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>), where <inline-formula>
<mml:math display="inline" id="im23"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> is linearly mapped to the spatial domain, and the output is then obtained by performing a dot product with <inline-formula>
<mml:math display="inline" id="im24"><mml:mrow><mml:msub><mml:mi>Z</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, as shown in <xref ref-type="disp-formula" rid="eq9">Equation 9</xref>:</p>
<disp-formula id="eq9"><label>(9)</label>
<mml:math display="block" id="M9"><mml:mrow><mml:mi>s</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mi>Z</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>Z</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:mi>W</mml:mi><mml:mo>+</mml:mo><mml:mi>b</mml:mi><mml:mo stretchy="false">)</mml:mo><mml:mo>&#x2297;</mml:mo><mml:msub><mml:mi>Z</mml:mi><mml:mn>2</mml:mn></mml:msub></mml:mrow></mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im25"><mml:mrow><mml:mi>W</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msup><mml:mi>&#x211d;</mml:mi><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub><mml:mo>&#xd7;</mml:mo><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>f</mml:mi><mml:mi>e</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula>. To ensure training stability, <inline-formula>
<mml:math display="inline" id="im26"><mml:mi>W</mml:mi></mml:math></inline-formula> is initialized to near-zero values, and b is initialized to 1. This initialization ensures that each gMLP block behaves like a regular linear layer during the early stages of training, where each feature is processed independently, and the model gradually learns the interactions between features during the learning process.</p>
</sec>
<sec id="s2_4_3">
<label>2.4.3</label>
<title>Feature aggregation module</title>
<p>For the output of the gMLP module, average pooling and a linear layer are used to reduce the dimensionality and integrate the high-dimensional features, mapping them to the final output space. Finally, the mean squared error loss function is employed to compute the error between the model&#x2019;s output and the anxiety level indicator. The average pooling operation averages the features along the spatial dimension, generating a scalar value for each channel, thereby reducing the feature dimensionality. This helps capture the overall trend of the features and extract more representative ones. The computation of this module is given by <xref ref-type="disp-formula" rid="eq10">Equation 10</xref>:</p>
<disp-formula id="eq10"><label>(10)</label>
<mml:math display="block" id="M10"><mml:mrow><mml:mi>o</mml:mi><mml:mi>u</mml:mi><mml:mi>t</mml:mi><mml:mo>=</mml:mo><mml:mi>A</mml:mi><mml:mi>v</mml:mi><mml:mi>e</mml:mi><mml:mi>P</mml:mi><mml:mi>o</mml:mi><mml:mi>o</mml:mi><mml:mi>l</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mo>&#xa0;</mml:mo><mml:mi>a</mml:mi><mml:mi>x</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mo>=</mml:mo><mml:mo>&#x201c;</mml:mo><mml:mtext>spatial&#x201d;</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#xd7;</mml:mo><mml:mi>W</mml:mi><mml:mo>+</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:math>
</disp-formula>
<p>Among them, <inline-formula>
<mml:math display="inline" id="im27"><mml:mrow><mml:mi>W</mml:mi><mml:mo>&#x2208;</mml:mo><mml:msup><mml:mi>&#x211d;</mml:mi><mml:mrow><mml:msub><mml:mi>d</mml:mi><mml:mrow><mml:mi>c</mml:mi><mml:mi>h</mml:mi><mml:mi>a</mml:mi><mml:mi>n</mml:mi><mml:mi>n</mml:mi><mml:mi>e</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:msub><mml:mo>&#xd7;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula>, <inline-formula>
<mml:math display="inline" id="im28"><mml:mrow><mml:mi>A</mml:mi><mml:mi>v</mml:mi><mml:mi>e</mml:mi><mml:mi>P</mml:mi><mml:mi>o</mml:mi><mml:mi>o</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:math></inline-formula> represents average pooling, <inline-formula>
<mml:math display="inline" id="im29"><mml:mi>W</mml:mi></mml:math></inline-formula> is the weight, <inline-formula>
<mml:math display="inline" id="im30"><mml:mi>b</mml:mi></mml:math></inline-formula> is the bias.</p>
</sec>
</sec>
<sec id="s2_5">
<label>2.5</label>
<title>Regression models</title>
<p>To compare the performance of the Conv_gMLP model, several ML and DL models were selected for this regression task. For all models, the n&#xd7;480 PLI features were used as input. For n samples, each containing 480 PLI features, 80% of the data was used for training, and 20% was used for testing. The predictions were compared to the true values using the HAM-A score, and the MAE was calculated to assess the performance.</p>
<sec id="s2_5_1">
<label>2.5.1</label>
<title>ML models</title>
<p>Tree-based models, such as decision trees, have been widely used in disease detection and have shown excellent performance. In this study, LightGBM (Light Gradient Boosting Machine), XGBoost (eXtreme Gradient Boosting), and CatBoost (Categorical Boosting) were employed for the quantitative assessment of anxiety severity in patients.</p>
<p>1. CatBoost is an improved version of the Gradient Boosting Decision Tree (GBDT) algorithm, specifically designed for classification or regression tasks involving categorical features. It effectively addresses the imbalance and sparsity of categorical features by introducing an adaptive learning rate technique based on symmetric decision trees. In each iteration, the gradient boosting algorithm is used to construct decision trees, optimizing model performance by minimizing the loss function. Compared to traditional GBDT, CatBoost automatically handles the encoding of categorical features, eliminating the need for complex manual processing. Additionally, it offers automatic hyperparameter tuning, missing value handling, and a range of performance optimization strategies, such as random data sampling and feature importance evaluation.</p>
<p>2. XGBoost is a learning algorithm based on GBDT and is widely used in ML and data mining. It employs decision trees as base learners and constructs a powerful ensemble model by iteratively optimizing the gradient of the loss function. The underlying mechanism can be summarized as follows: during the construction of each tree, the gradient boosting algorithm is utilized to minimize the loss function, while a regularization term is introduced to control model complexity and prevent overfitting. Additionally, XGBoost incorporates a customized optimization strategy known as the &#x201c;approximate greedy algorithm,&#x201d; which efficiently leverages second-order gradient information of features to accelerate the training process. Ultimately, the final prediction output is obtained by aggregating the predictions of all weak learners.</p>
<p>3. LightGBM is an efficient ML algorithm based on GBDT, specifically optimized for large-scale datasets and high-dimensional features. It employs a &#x201c;histogram-based decision tree&#x201d; algorithm, which accelerates the training process by discretizing feature values and constructing histograms, thereby reducing memory consumption and computational complexity. In each iteration, the gradient boosting algorithm is used to construct decision trees, optimizing model performance by minimizing the loss function. Compared to traditional GBDT, LightGBM introduces a leaf-wise optimal split algorithm that precisely selects the best split points, further enhancing model accuracy. Additionally, it supports parallelized training and inference and provides a comprehensive set of hyperparameter tuning options and feature importance evaluation methods.</p>
</sec>
<sec id="s2_5_2">
<label>2.5.2</label>
<title>DL models</title>
<p>Several DL models have also been employed for tabular data classification and compared with the performance of Conv_gMLP, including MLP and one-dimensional convolutional neural networks (1D CNN).</p>
<p>1. MLP is a feedforward artificial neural network model composed of multiple layers of neurons. Each neuron performs a nonlinear transformation of the input data using an activation function, with the Rectified Linear Unit (ReLU) function employed in this study. The network is trained through the backpropagation algorithm, iteratively updating parameters layer by layer to minimize the loss function. MLP exhibits strong fitting capabilities and nonlinear modeling potential, making it well-suited for capturing complex input-output relationships.</p>
<p>2. 1D CNN is a model based on Convolutional Neural Network (CNN), specifically designed for processing sequential data, such as time series or tabular data. It extracts features by applying one-dimensional convolution operations to the input data and reduces feature dimensionality through pooling operations. By stacking multiple convolution and pooling layers, the model progressively learns higher-level abstract features. Finally, classification is performed through fully connected layers and activation functions. The advantage of 1D CNN lies in its ability to effectively capture local patterns in data while maintaining parameter sharing and translation invariance. To enhance the model&#x2019;s representational capacity, two linear modules, each consisting of a linear layer followed by a ReLU activation function, were added to the output layer.</p>
</sec>
</sec>
<sec id="s2_6">
<label>2.6</label>
<title>Parameters optimization</title>
<p>ML and DL models contain numerous hyperparameters, and different combinations of these parameters yield varying training outcomes. Since hyperparameter selection significantly impacts model performance and generalization ability, optimizing these parameters helps mitigate the risks of overfitting or underfitting, thereby enhancing predictive accuracy and robustness. This study employ ed the Tree-structured Parzen Estimator (TPE) algorithm to search for the optimal hyperparameter configuration and improve model performance. Compared with traditional grid search or random search methods, TPE leverages historical data to accelerate the optimization process, offering lower time complexity and efficiently identifying superior hyperparameter sets in less time.</p>
<p>The TPE algorithm, implemented in the Hyperopt Python library, is a powerful method for hyperparameter optimization, comprising three key steps to ensure an effective and practical process. First, defining the objective function is fundamental to the TPE algorithm. The optimization target in this study is the MAE between the model&#x2019;s predictions on the test set and the ground truth. Second, defining the hyperparameter search space is crucial. The hyperparameter search space used in this study is detailed in <xref ref-type="table" rid="T3"><bold>Tables&#xa0;3</bold></xref>&#x2013;<xref ref-type="table" rid="T8"><bold>8</bold></xref>, with the selection of ML optimization parameters and their respective ranges based on the AutoGluon automated ML framework. Finally, setting the number of search iterations controls the optimization process, with a maximum of 30 iterations specified. Through the systematic integration of these three steps, the TPE algorithm efficiently explores the hyperparameter space, enhancing both the performance and generalization capability of ML and DL models.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>LightGBM optimization variables and ranges.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Parameter</th>
<th valign="middle" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">num_leaves</td>
<td valign="middle" align="center">uniformint[16,96]</td>
</tr>
<tr>
<td valign="middle" align="center">min_data_in_leaf</td>
<td valign="middle" align="center">uniformint[2,60]</td>
</tr>
<tr>
<td valign="middle" align="center">feature_fraction</td>
<td valign="middle" align="center">Uniform[0.75,1]</td>
</tr>
<tr>
<td valign="middle" align="center">learning_rate</td>
<td valign="middle" align="center">loguniform[log(5e-3),log(0.1)]</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>XGBoost optimization variables and ranges.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Parameter</th>
<th valign="middle" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Learning rate</td>
<td valign="middle" align="center">loguniform[log(5e-3),log(0.1)]</td>
</tr>
<tr>
<td valign="middle" align="center">depth</td>
<td valign="middle" align="center">uniformint[3,10]</td>
</tr>
<tr>
<td valign="middle" align="center">min_child_weight</td>
<td valign="middle" align="center">uniformint[1,5]</td>
</tr>
<tr>
<td valign="middle" align="center">colsample_bytree</td>
<td valign="middle" align="center">uniform[0.5,1.0]</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Catboost optimization variables and ranges.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Parameter</th>
<th valign="middle" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">max_depth</td>
<td valign="middle" align="center">uniformint[5,8]</td>
</tr>
<tr>
<td valign="middle" align="center">l2_leaf_reg</td>
<td valign="middle" align="center">uniform[1,5]</td>
</tr>
<tr>
<td valign="middle" align="center">learning_rate</td>
<td valign="middle" align="center">loguniform[log(5e-3),log(0.1)]</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T6" position="float">
<label>Table&#xa0;6</label>
<caption>
<p>MLP optimization variables and ranges.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Parameter</th>
<th valign="middle" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">num_layers</td>
<td valign="middle" align="center">uniformint[1,2,3]</td>
</tr>
<tr>
<td valign="middle" align="center">num_ffn</td>
<td valign="middle" align="center">uniformint[128,256,384,512]</td>
</tr>
<tr>
<td valign="middle" align="center">batch_size</td>
<td valign="middle" align="center">uniformint[64,96,128,160]</td>
</tr>
<tr>
<td valign="middle" align="center">learning_rate</td>
<td valign="middle" align="center">loguniform[log(5e-5),log(1e-3)]</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T7" position="float">
<label>Table&#xa0;7</label>
<caption>
<p>Optimization variables and ranges of gMLP.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Parameter</th>
<th valign="middle" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">d_channel</td>
<td valign="middle" align="center">uniformint[32,64,128,256]</td>
</tr>
<tr>
<td valign="middle" align="center">num_layers</td>
<td valign="middle" align="center">uniformint[1,2,4,6,8]</td>
</tr>
<tr>
<td valign="middle" align="center">num_ffn</td>
<td valign="middle" align="center">uniformint[128,256,384,512,640,768]</td>
</tr>
<tr>
<td valign="middle" align="center">batch_size</td>
<td valign="middle" align="center">uniformint [64,96,128,160]</td>
</tr>
<tr>
<td valign="middle" align="center">learning_rate</td>
<td valign="middle" align="center">loguniform[log(5e-5),log(1e-3)]</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T8" position="float">
<label>Table&#xa0;8</label>
<caption>
<p>Optimization variables and ranges of 1D CNN.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Parameter</th>
<th valign="middle" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">batch_size</td>
<td valign="middle" align="center">uniformint [64,96,128,160]</td>
</tr>
<tr>
<td valign="middle" align="center">learning_rate</td>
<td valign="middle" align="center">loguniform[log(5e-5),log(1e-3)]</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2_7">
<label>2.7</label>
<title>Evaluation metrics</title>
<p>In this study, repeated 5-fold cross-validation was employed to reduce estimation bias and improve the robustness of model evaluation. For ML models, four subsets of the data were used for training, while the remaining subset was used for testing to evaluate model performance. For DL models, 10% of the training data was further randomly selected for validation to prevent overfitting. All models underwent three rounds of repeated 5-fold cross-validation, and the final results were obtained by averaging the performance across all test sets. Model performance was assessed using the MAE, where a lower MAE indicates better model performance. The calculation formula is provided in <xref ref-type="disp-formula" rid="eq11">Equation 11</xref>:</p>
<disp-formula id="eq11"><label>(11)</label>
<mml:math display="block" id="M11"><mml:mrow><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>n</mml:mi></mml:mfrac><mml:mstyle displaystyle="true"><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover></mml:mstyle><mml:mrow><mml:mo>|</mml:mo><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi>y</mml:mi><mml:mo>^</mml:mo></mml:mover><mml:mi>i</mml:mi></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
</sec>
<sec id="s2_8">
<label>2.8</label>
<title>Grad-CAM visualization</title>
<p>Model interpretability has long been a critical research focus in the fields of artificial intelligence and ML. In this study, Gradient-weighted Regression Activation Mapping (Grad-RAM) is utilized to interpret the decision-making process of DL models. Grad-RAM builds upon Class Activation Mapping (CAM) and Gradient-weighted Class Activation Mapping (Grad-CAM), offering an advanced visualization technique.</p>
<p>CAM generates class-specific activation heatmaps by leveraging the feature maps from the last convolutional layer of a CNN with weights derived from the global average pooling layer, thereby revealing the key activation regions in the input signals. However, CAM struggles to capture detailed spatial information within feature maps. Grad-CAM addresses this limitation by incorporating gradient information to weight the heatmap, thereby improving the precision of interpretability and deepening insights into model decisions. This enables clear visualization of the regions critical to the model&#x2019;s classification decisions. Consequently, Grad-CAM significantly enhances model interpretability, fostering a better understanding of DL model decision-making and increasing their reliability. Its mathematical formulation is detailed in <xref ref-type="disp-formula" rid="eq12">Equations 12</xref>, <xref ref-type="disp-formula" rid="eq13">13</xref>:</p>
<disp-formula id="eq12"><label>(12)</label>
<mml:math display="block" id="M12"><mml:mrow><mml:msubsup><mml:mi>&#x3b1;</mml:mi><mml:mi>k</mml:mi><mml:mi>c</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>Z</mml:mi></mml:mfrac><mml:munder><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mi>i</mml:mi></mml:munder><mml:munder><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mi>j</mml:mi></mml:munder><mml:mfrac><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:msup><mml:mi>y</mml:mi><mml:mi>c</mml:mi></mml:msup></mml:mrow><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:msubsup><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mi>k</mml:mi></mml:msubsup></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<p>Here, <inline-formula>
<mml:math display="inline" id="im31"><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mi>c</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> represents the score predicted by the network for class c (before applying softmax activation), <inline-formula>
<mml:math display="inline" id="im32"><mml:mrow><mml:msubsup><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mi>k</mml:mi></mml:msubsup></mml:mrow></mml:math></inline-formula> denotes the data at position (ij) in channel k of the feature layer A, and <inline-formula>
<mml:math display="inline" id="im33"><mml:mi>Z</mml:mi></mml:math></inline-formula> refers to the width and height of the feature layer multiplied together.</p>
<disp-formula id="eq13"><label>(13)</label>
<mml:math display="block" id="M13"><mml:mrow><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mi>G</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>d</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>C</mml:mi><mml:mi>A</mml:mi><mml:mi>M</mml:mi></mml:mrow><mml:mi>c</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>L</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:munder><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mi>k</mml:mi></mml:munder><mml:msubsup><mml:mi>&#x3b1;</mml:mi><mml:mi>k</mml:mi><mml:mi>c</mml:mi></mml:msubsup><mml:msup><mml:mi>A</mml:mi><mml:mi>k</mml:mi></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<p>Here, A represents a feature layer (the output of the last convolutional layer is taken in this study), k denotes the k-th channel in feature layer A, c represents the target class, <inline-formula>
<mml:math display="inline" id="im34"><mml:mrow><mml:msup><mml:mi>A</mml:mi><mml:mi>k</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> refers to the data in the k-th channel of feature layer A, and <inline-formula>
<mml:math display="inline" id="im35"><mml:mrow><mml:msubsup><mml:mi>&#x3b1;</mml:mi><mml:mi>k</mml:mi><mml:mi>c</mml:mi></mml:msubsup></mml:mrow></mml:math></inline-formula> represents the weight of the k-th channel in feature layer A for class c.</p>
<p>Grad-RAM is an improved version of Grad-CAM designed for the interpretability of regression tasks. For regression models, the gradient is defined as the derivative of the inverse of the prediction bias, where a smaller bias leads to a larger gradient. Specifically, <inline-formula>
<mml:math display="inline" id="im36"><mml:mrow><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mtext>Grad-RAM</mml:mtext></mml:mrow><mml:mi>e</mml:mi></mml:msubsup></mml:mrow></mml:math></inline-formula> is derived from <xref ref-type="disp-formula" rid="eq14">Equation 14</xref>, with the weight of each channel <inline-formula>
<mml:math display="inline" id="im37"><mml:mrow><mml:msubsup><mml:mi>&#x3b1;</mml:mi><mml:mi>k</mml:mi><mml:mi>e</mml:mi></mml:msubsup></mml:mrow></mml:math></inline-formula> calculated using <xref ref-type="disp-formula" rid="eq15">Equation 15</xref>. The probability <inline-formula>
<mml:math display="inline" id="im38"><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mi>c</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula> in Grad-CAM is replaced by <inline-formula>
<mml:math display="inline" id="im39"><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mi>e</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula>, which is computed from <xref ref-type="disp-formula" rid="eq16">Equation 16</xref>, where &#x177; is the predicted target value and y is the predicted value. <inline-formula>
<mml:math display="inline" id="im40"><mml:mrow><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mtext>Grad-RAM</mml:mtext></mml:mrow><mml:mi>e</mml:mi></mml:msubsup></mml:mrow></mml:math></inline-formula> changes according to the variation in the error bias.</p>
<disp-formula id="eq14"><label>(14)</label>
<mml:math display="block" id="M14"><mml:mrow><mml:msubsup><mml:mi>L</mml:mi><mml:mrow><mml:mtext>Grad-RAM</mml:mtext></mml:mrow><mml:mi>e</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:mi>Re</mml:mi><mml:mi>L</mml:mi><mml:mi>U</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:munder><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mi>k</mml:mi></mml:munder><mml:msubsup><mml:mi>&#x3b1;</mml:mi><mml:mi>k</mml:mi><mml:mi>e</mml:mi></mml:msubsup><mml:msubsup><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mi>k</mml:mi></mml:msubsup></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq15"><label>(15)</label>
<mml:math display="block" id="M15"><mml:mrow><mml:msubsup><mml:mi>&#x3b1;</mml:mi><mml:mi>k</mml:mi><mml:mi>e</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mi>Z</mml:mi></mml:mfrac><mml:munder><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mi>i</mml:mi></mml:munder><mml:munder><mml:mstyle displaystyle="true"><mml:mo>&#x2211;</mml:mo></mml:mstyle><mml:mi>j</mml:mi></mml:munder><mml:mfrac><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:msup><mml:mi>y</mml:mi><mml:mi>e</mml:mi></mml:msup></mml:mrow><mml:mrow><mml:mo>&#x2202;</mml:mo><mml:msubsup><mml:mi>A</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow><mml:mi>k</mml:mi></mml:msubsup></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
<disp-formula id="eq16"><label>(16)</label>
<mml:math display="block" id="M16"><mml:mrow><mml:msup><mml:mi>y</mml:mi><mml:mi>e</mml:mi></mml:msup><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:msqrt><mml:mrow><mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mover><mml:mi>y</mml:mi><mml:mo>&#x2c6;</mml:mo></mml:mover><mml:mo>&#x2212;</mml:mo><mml:mi>y</mml:mi><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:msqrt><mml:mo>+</mml:mo><mml:msup><mml:mn>1</mml:mn><mml:mrow><mml:mi>e</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>9</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:mrow></mml:math>
</disp-formula>
</sec>
</sec>
<sec id="s3" sec-type="results">
<label>3</label>
<title>Results</title>
<p>This study explored the effect of different time window lengths on predictive performance, with the detailed results presented in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>. The figure illustrates the regression performance of various models across different time windows. It was observed that for both traditional ML models (LightGBM, XGBoost, CatBoost) and DL models (1D CNN, MLP, Conv_gMLP), model performance gradually improved as the time window increased from 2 s to 10 s. However, beyond 10 s, DL models exhibited a decline in performance, whereas traditional ML models showed only marginal improvement. Notably, the proposed Conv_gMLP model achieved significantly lower prediction errors than all other models. The optimal predictive performance was obtained at a 10-second time window, yielding the lowest error of 0.32 &#xb1; 0.07, which was substantially superior to other models (LightGBM: 4.24 &#xb1; 0.08, XGBoost: 3.97 &#xb1; 0.09, CatBoost: 3.87 &#xb1; 0.09, 1D CNN: 2.65 &#xb1; 0.23, MLP: 1.16 &#xb1; 0.17).</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Regression performance of different time windows and different models.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1764932-g002.tif">
<alt-text content-type="machine-generated">Line graph comparing mean absolute error against time windows for seven machine learning models: Lightgbm, xgboost, catboost, 1d_cnn, MLP, qEEG_gMLP. qEEG_gMLP shows consistently lowest error, followed by MLP. The x-axis ranges from 0 to 20, and the y-axis ranges from 0 to 7. Each model is distinguished by different line styles and colors, with a legend included for reference.</alt-text>
</graphic></fig>
<p>Ablation experiments were conducted to explore the impact of different gMLP architectures on model performance. Conv_gMLP (SC) applies the SGU before the CP, allowing early spatial feature selection but potentially limiting subsequent channel interactions. In contrast, Conv_gMLP(CS) first employs CP to enhance feature representation across channels, followed by SGU to refine spatial dependencies, leading to more effective feature extraction. The experimental results are presented in <xref ref-type="table" rid="T9"><bold>Table&#xa0;9</bold></xref>. As shown in <xref ref-type="table" rid="T9"><bold>Table&#xa0;9</bold></xref>, each component of Conv_gMLP and its corresponding placement play a crucial role. In summary, the proposed Conv_gMLP model consistently outperformed other models across different time windows, demonstrating its significant advantage in evaluating the GAD dataset used in this study. This superiority stems from the model&#x2019;s unique architectural design and the inherent strengths of the gMLP framework in handling nonlinear relationships and extracting complex features.</p>
<table-wrap id="T9" position="float">
<label>Table&#xa0;9</label>
<caption>
<p>Ablation experiment results.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Model</th>
<th valign="middle" align="center">MAE</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">MLP</td>
<td valign="middle" align="center">1.15 &#xb1; 0.12</td>
</tr>
<tr>
<td valign="middle" align="center">Conv_gMLP (SC)</td>
<td valign="middle" align="center">0.74 &#xb1; 0.14</td>
</tr>
<tr>
<td valign="middle" align="center">Conv_gMLP (CS)</td>
<td valign="middle" align="center">0.32 &#xb1; 0.07</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Furthermore, this study explored the effect of different normalization methods on predictive performance within a 10-second time window, comparing LayerNorm, BatchNorm, and no normalization (Non). Detailed results are presented in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>. LayerNorm yielded the highest error at 1.01 &#xb1; 0.24, indicating suboptimal performance. In contrast, BatchNorm significantly reduced both error and standard deviation to 0.32 &#xb1; 0.07, while the error without any normalization was 0.53 &#xb1; 0.24. Based on these findings, BatchNorm was selected as the preferred normalization method due to its superior performance in minimizing error. Among the tested methods, LayerNorm exhibited the worst performance, yielding the highest error. This may be attributed to the fact that LayerNorm normalizes each sample independently at every layer of the neural network. For quantified EEG feature data, it considers only the statistical properties of individual samples while ignoring inter-sample statistics. The high complexity of the experimental data may have hindered LayerNorm&#x2019;s ability to effectively normalize the features, ultimately impairing model performance. In comparison, BatchNorm demonstrated the best performance in this study, significantly reducing both the error and standard deviation. This improvement is likely due to BatchNorm&#x2019;s ability to account for the statistical properties of mini-batches, allowing it to better capture the global characteristics of EEG feature data. Moreover, BatchNorm&#x2019;s inherent regularization properties help mitigate overfitting, enhancing the model&#x2019;s generalization capability.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Different normalization methods in Conv_gMLP.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1764932-g003.tif">
<alt-text content-type="machine-generated">Bar chart showing mean absolute error with error bars for three categories: LayerNorm (blue, approximately 1.0), BatchNorm (orange, approximately 0.4), and Non (green, approximately 0.6).</alt-text>
</graphic></fig>
<p>Hyperparameter optimization of the 10-second Conv_gMLP model under BatchNorm settings was performed using the TPE optimization algorithm in this study. <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref> illustrates the parameter search space. The results showed that the optimal fitting performance was achieved when the d-channel was set to 64, the num-layers was 8, the feedforward network dimension (d-ffn) was 256, the batch size was 96, and the learning rate was 0.00055, yielding a minimum MAE of 0.325. This finding indicated that under this specific combination of hyperparameters, the model effectively captured underlying data patterns and achieved lower&#xa0;prediction errors. Compared to other hyperparameter configurations, this combination optimally balanced model complexity and training stability, thereby preventing overfitting and enhancing generalization capability.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Performance of parameter combination optimized by TPE method.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1764932-g004.tif">
<alt-text content-type="machine-generated">Five scatter plots compare mean absolute error against various hyperparameters: d_channel, num_layers, d_ffn, batch_size, and learning_rate. Each plot shows varying distributions, indicating how these parameters influence error rates.</alt-text>
</graphic></fig>
<p>Finally, the Grad-RAM method was utilized to compute the feature weights, which were subsequently ranked from highest to lowest. The results, presented in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5a</bold></xref>, illustrate the distribution of feature importance across different brain regions. The analysis revealed that the frontal lobe exhibited the highest feature weight among all brain regions, followed by the temporal lobe, underscoring the critical role of these regions in the brain functional activity of GAD patients. Moreover, <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5b</bold></xref> depicts the distribution of rhythmic features across brain regions revealing that, although the boundaries of importance among various rhythmic features were somewhat ambiguous, the feature weight of the beta rhythm was notably higher. Further analysis indicated that GAD patients with different severity levels displayed distinct abnormalities in brain FC strength and the number of connections. These differences could serve as potential biomarkers for differentiating the severity of GAD. Thus, the EEG-based feature weight analysis provides a novel quantitative basis for the diagnosis of GAD and estimation of its severity.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p><bold>(a)</bold> Distribution of feature weights across different brain regions; <bold>(b)</bold> Distribution of importance across different EEG rhythm features.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1764932-g005.tif">
<alt-text content-type="machine-generated">Two line graphs labeled (a) and (b). Graph (a) shows five brain areas: Frontal, Central, Parietal, Occipital, and Temporal, with Frontal having the most edges and Parietal the least. Graph (b) displays edges per rhythm for Theta, Alpha1, Alpha2, and Beta, all lines closely aligning. Both graphs plot the number of edges versus selected edges number.</alt-text>
</graphic></fig>
</sec>
<sec id="s4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>This study introduces an innovative approach to assessing GAD severity by applying the Conv_gMLP model to resting-state EEG signal processing. The main findings of the study are as follows: First, the Conv_gMLP model demonstrated significantly lower prediction MAE compared to other models, highlighting the effectiveness of its architectural design and feature extraction strategy. Second, FC analysis revealed that the frontal and temporal lobes played a crucial role in distinguishing GAD severity, with beta rhythms emerging as the most informative feature. These findings underscore the importance of both the model&#x2019;s predictive capability and the neurophysiological insights derived from EEG-based analysis, which will be further elaborated in the following sections.</p>
<sec id="s4_1">
<label>4.1</label>
<title>Time-window optimization and architectural innovation of Conv_gMLP: enhancing precision in GAD severity assessment</title>
<p>This study demonstrates that the Conv_gMLP model significantly outperforms other models in predicting the severity of GAD, owing to its innovative architectural design and optimized feature extraction strategy. Two key factors drive its superior performance: the optimization of the time window and the innovative structure of the Conv_gMLP model.</p>
<p>First, the selection of the optimal time window is critical for enhancing model performance. Previous studies have indicated that the selection of sliding time windows influences the quantification of brain network stability and detection performance, suggesting that optimizing this parameter can enhance predictive accuracy (<xref ref-type="bibr" rid="B41">41</xref>). Furthermore, it has been confirmed that a time window of approximately 10 s is most suitable for automatic EEG feature extraction and emotion state recognition, effectively capturing emotion-related oscillatory characteristics (<xref ref-type="bibr" rid="B42">42</xref>). This duration aligns with the principles of neurophysiology, as EEG signals can capture dynamic neural network activities. For instance, the beta rhythm oscillations, which are highly sensitive to anxiety levels (<xref ref-type="bibr" rid="B43">43</xref>), reflect the heightened brain alertness and arousal in GAD patients (<xref ref-type="bibr" rid="B44">44</xref>). Moreover, human emotional states, including anxiety, typically last for 10 s or longer (<xref ref-type="bibr" rid="B45">45</xref>). By systematically varying the sliding-window length, a 10-s epoch yielded optimal performance, with the ensemble learner attaining 98.1% accuracy for GAD-severity grading under this temporal window (<xref ref-type="bibr" rid="B24">24</xref>). Thus, the 10-second window effectively captures these oscillatory features, providing a biologically grounded explanation for the model&#x2019;s superior performance.</p>
<p>Second, the Conv_gMLP architecture leverages neurobiological insights to improve GAD severity prediction. Anxiety disorders are associated with aberrant large-scale functional network patterns, characterized by enhanced connectivity between the insula and thalamus and reduced activity within the frontoparietal network&#x2014;patterns that provide insight into the underlying pathophysiological changes in anxiety disorders (<xref ref-type="bibr" rid="B46">46</xref>). Notably, Chu et&#xa0;al. observed that GAD patients exhibit significantly stronger high-beta-band FC between brain regions than HC, and that this hyper-connectivity becomes more pronounced with longer illness duration (<xref ref-type="bibr" rid="B47">47</xref>). Functional networks are collections of brain regions with closely coordinated activity during both resting-state and cognitive tasks, each supporting distinct aspects of cognition. The Conv_gMLP model integrates quantitative EEG analysis with the gMLP framework (<xref ref-type="bibr" rid="B40">40</xref>), enabling precise identification of EEG activity patterns in GAD patients by extracting PLI features from EEG signals. This design reflects a deep understanding of both the data structure and the underlying neural mechanisms. In conclusion, the Conv_gMLP model successfully predicts the severity of GAD, owing to its optimized time window selection and innovative architectural design. These aspects not only enhance the model&#x2019;s technical performance but also strengthen its alignment with the neurophysiological basis of anxiety. Future research should focus on refining this model and exploring its potential in clinical applications.</p>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Key factors in GAD severity evaluation: brain regions, beta rhythms, and FC</title>
<p>This study investigates the neurophysiological underpinnings of GAD severity, emphasizing the roles of brain regions, beta rhythms, and FC in characterizing the condition. Persistent excessive worry in GAD is strongly associated with structural and functional brain alterations (<xref ref-type="bibr" rid="B48">48</xref>), and identifying these functional changes may uncover disruptions in neural circuits and emotional regulation. Using the Grad-RAM method for feature weight analysis, this study suggests that severity-relevant features are primarily concentrated in the frontal and temporal lobes. Mechanistically, the prominence of frontal and temporal regions may reflect the involvement of large-scale networks supporting cognitive control, affective processing, and their interaction, which are broadly relevant to anxiety symptom expression. Studies indicate that the frontal cortex plays a crucial role in emotional cognition (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B49">49</xref>). FC analysis further confirms aberrant prefrontal network connectivity in anxiety disorders (<xref ref-type="bibr" rid="B46">46</xref>) and highlights abnormalities in the temporal region of patients with GAD (<xref ref-type="bibr" rid="B25">25</xref>). Further studies reveal that most brain regions in GAD patients exhibit elevated correlation dimension (D2) values, particularly in the left prefrontal and right temporal lobes (<xref ref-type="bibr" rid="B50">50</xref>). Because correlation dimension is a nonlinear measure reflecting the complexity of EEG dynamics and information processing, such elevations in these regions may indicate altered cortical dynamics associated with greater anxiety severity. This finding further reinforces that key brain signals for assessing GAD severity primarily originate from the prefrontal and temporal lobes, consistent with our results.</p>
<p>Moreover, EEG rhythms encode extensive neural activity information and have been widely applied in both research and clinical assessments of GAD (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>). The EEG features of beta rhythms are significantly correlated with brain activity levels, with increased beta-band activity closely associated with anxiety symptoms (<xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B53">53</xref>). Multiple studies have demonstrated that beta rhythm power is significantly elevated in GAD patients (<xref ref-type="bibr" rid="B53">53</xref>&#x2013;<xref ref-type="bibr" rid="B55">55</xref>), strongly associated with brain alertness and arousal states (<xref ref-type="bibr" rid="B56">56</xref>), and more easily detectable under anxiety conditions (<xref ref-type="bibr" rid="B57">57</xref>). Mechanistically, heightened beta power may reflect persistently increased arousal and reduced network flexibility, which could make beta-band FC particularly informative for continuous severity estimation. Beta rhythm EEG features have been quantitatively utilized in GAD assessment. Recent evidence demonstrates significantly elevated beta power together with aberrant long-range fronto-temporal FC in patients relative to HC (<xref ref-type="bibr" rid="B43">43</xref>). This convergence may reflect stronger long-range coupling that supports sustained arousal, which could be relevant to severity estimation. Our study identifies beta rhythms as the most critical among EEG rhythm features, further confirming their value in evaluating GAD severity and providing a plausible biological basis for the observed predictive contributions.</p>
<p>Finally, the study suggests that EEG-based FC may serve as a valuable biomarker for distinguishing individuals with GAD (<xref ref-type="bibr" rid="B47">47</xref>). FC reflects the coordination and interactions among different brain regions, with alterations observable even in unconscious states. Variations in FC indicate alterations in brain region coordination and cognitive function, capturing specific traits of GAD (<xref ref-type="bibr" rid="B58">58</xref>). These connectivity disruptions are especially pronounced in GAD, where frontal-temporal connectivity strength is strongly associated with cognitive function. Overall, this study confirms the impact of brain regions and beta rhythms on GAD severity. It also underscores the importance of FC in brain coordination and cognition, suggesting its potential as a biomarker for assessing GAD severity.</p>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Limitations</title>
<p>Although this study offers new evidence supporting a novel approach to quantifying the severity of GAD, several limitations should be acknowledged. First, the sample size was relatively small, and the sex distribution was uneven. Therefore, the generalizability of the proposed EEG-based severity estimation framework across sexes should be interpreted cautiously. Future studies should aim to increase the number of participants and ensure balanced sample sizes across groups. Second, the HAM-A was used as the reference standard for quantifying GAD severity. However, because the HAM-A is a clinician-rated and ordinal measurement instrument, it is subject to measurement variability, which may partly manifest as label noise in the model training data. Accordingly, the model&#x2019;s prediction error reflects not only the potential gap between EEG-derived neurophysiological features and symptom severity, but also the intrinsic uncertainty of the clinical reference standard itself. Conceptually, our model learned a mapping between EEG features and the contemporaneous clinical consensus on severity. Future work should integrate multimodal assessments and longitudinal outcomes to establish a more robust composite standard, thereby further improving the validity and clinical applicability of objective severity evaluation. Third, the cross-sectional design limits the ability to evaluate the model&#x2019;s performance in longitudinal monitoring or in predicting treatment responses. Prospective studies with repeated EEG measurements over time are necessary to determine the model&#x2019;s utility in tracking disease progression. Finally, the current model was trained using data from a single clinical center, which may introduce regional bias and restrict its generalizability. Future research should prioritize multicenter validation to ensure the model&#x2019;s robustness across diverse populations and healthcare settings.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions">
<label>5</label>
<title>Conclusion</title>
<p>In conclusion, this study introduced a quantitative approach to assessing the severity of GAD and successfully applied the Conv_gMLP model for its objective evaluation, thereby further validating the feasibility of using algorithmic models for severity quantification. The Conv_gMLP model achieved the lowest MAE (0.32 &#xb1; 0.07) in predicting GAD severity within a 10-second time window, significantly outperforming traditional ML models as well as other DL models. Additionally, the FC analysis conducted in this study provided evidence of altered brain network interactions in GAD patients, particularly showing enhanced connectivity between the frontal and temporal lobes. Regarding rhythmic features, beta rhythm features were found to carry noticeably higher weights across brain regions. Based on these findings, the continued development and application of algorithmic models such as Conv_gMLP hold great promise for enabling more accurate and objective diagnosis and assessment of GAD severity, thereby supporting precision diagnosis, individualized treatment, and the advancement of clinical care for GAD.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The datasets generated and analysed during the current study are not publicly available due to participant privacy and ethical restrictions, but are available from the corresponding authors on reasonable request.</p></sec>
<sec id="s7" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Ethics Committee of Zhejiang Normal University. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p></sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>XL: Project administration, Writing &#x2013; original draft. YC: Writing &#x2013; review &amp; editing, Data curation. ZY: Writing &#x2013; review &amp; editing, Software. WL: Methodology, Writing &#x2013; review &amp; editing, Software. BZ: Validation, Writing &#x2013; review &amp; editing, Data curation. GL: Writing &#x2013; review &amp; editing, Project administration. SL: Writing &#x2013; review &amp; editing, Validation.</p></sec>
<ack>
<title>Acknowledgments</title>
<p>We would like to express our sincere gratitude to the Jinhua Science and Technology Bureau for their funding, and also thank all the participants in this article.</p>
</ack>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Nejati</surname> <given-names>V</given-names></name>
<name><surname>Rad</surname> <given-names>JA</given-names></name>
<name><surname>Rasanan</surname> <given-names>AHH</given-names></name>
</person-group>. 
<article-title>Neuromodulation of risk and reward processing during decision making in individuals with general anxiety disorder (GAD)</article-title>. <source>Sci Rep</source>. (<year>2025</year>) <volume>15</volume>:<fpage>371</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-024-84520-y</pub-id>, PMID: <pub-id pub-id-type="pmid">39747372</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<label>2</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Association</surname> <given-names>WH</given-names></name>
</person-group>. 
<article-title>International classification of diseases 11th revision (ICD-11)</article-title>. <source>World Health Organ</source>. (<year>2018</year>).
</mixed-citation>
</ref>
<ref id="B3">
<label>3</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Papola</surname> <given-names>D</given-names></name>
<name><surname>Miguel</surname> <given-names>C</given-names></name>
<name><surname>Mazzaglia</surname> <given-names>M</given-names></name>
<name><surname>Franco</surname> <given-names>P</given-names></name>
<name><surname>Tedeschi</surname> <given-names>F</given-names></name>
<name><surname>Romero</surname> <given-names>SA</given-names></name>
<etal/>
</person-group>. 
<article-title>Psychotherapies for generalized anxiety disorder in adults: a systematic review and network meta-analysis of randomized clinical trials</article-title>. <source>JAMA Psychiatry</source>. (<year>2024</year>) <volume>81</volume>:<page-range>250&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1001/jamapsychiatry.2023.3971</pub-id>, PMID: <pub-id pub-id-type="pmid">37851421</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<label>4</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Matsuyama</surname> <given-names>S</given-names></name>
<name><surname>Otsubo</surname> <given-names>T</given-names></name>
<name><surname>Nomoto</surname> <given-names>K</given-names></name>
<name><surname>Higa</surname> <given-names>S</given-names></name>
<name><surname>Takashio</surname> <given-names>O</given-names></name>
</person-group>. 
<article-title>Prevalence of generalized anxiety disorder in Japan: a general population survey</article-title>. <source>Neuropsychiatr Dis Treat</source>. (<year>2024</year>) <volume>20</volume>:<page-range>1355&#x2013;66</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2147/NDT.S456272</pub-id>, PMID: <pub-id pub-id-type="pmid">38947368</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<label>5</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Dickson</surname> <given-names>SJ</given-names></name>
<name><surname>Oar</surname> <given-names>EL</given-names></name>
<name><surname>Kangas</surname> <given-names>M</given-names></name>
<name><surname>Johnco</surname> <given-names>CJ</given-names></name>
<name><surname>Lavell</surname> <given-names>CH</given-names></name>
<name><surname>Seaton</surname> <given-names>AH</given-names></name>
<etal/>
</person-group>. 
<article-title>A systematic review and meta-analysis of impairment and quality of life in children and adolescents with anxiety disorders</article-title>. <source>Clin Child Family Psychol Rev</source>. (<year>2024</year>) <volume>27</volume>:<page-range>342&#x2013;56</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10567-024-00484-5</pub-id>, PMID: <pub-id pub-id-type="pmid">38782783</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<label>6</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ibrahim</surname> <given-names>D</given-names></name>
<name><surname>Ahmed</surname> <given-names>RM</given-names></name>
<name><surname>Mohammad</surname> <given-names>AZ</given-names></name>
<name><surname>Ibrahim</surname> <given-names>B</given-names></name>
<name><surname>Mohammed</surname> <given-names>T</given-names></name>
<name><surname>Mohamed</surname> <given-names>ME</given-names></name>
<etal/>
</person-group>. 
<article-title>Prevalence and correlates of generalized anxiety disorder and perceived stress among Sudanese medical students</article-title>. <source>BMC Psychiatry</source>. (<year>2024</year>) <volume>24</volume>:<fpage>68</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12888-024-05510-y</pub-id>, PMID: <pub-id pub-id-type="pmid">38263070</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<label>7</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wilmer</surname> <given-names>MT</given-names></name>
<name><surname>Anderson</surname> <given-names>K</given-names></name>
<name><surname>Reynolds</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>Correlates of quality of life in anxiety disorders: review of recent research</article-title>. <source>Curr Psychiatry Rep</source>. (<year>2021</year>) <volume>23</volume>:<fpage>77</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11920-021-01290-4</pub-id>, PMID: <pub-id pub-id-type="pmid">34613508</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<label>8</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Stein</surname> <given-names>DJ</given-names></name>
<name><surname>Craske</surname> <given-names>MG</given-names></name>
<name><surname>Rothbaum</surname> <given-names>BO</given-names></name>
<name><surname>Chamberlain</surname> <given-names>SR</given-names></name>
<name><surname>Fineberg</surname> <given-names>NA</given-names></name>
<name><surname>Choi</surname> <given-names>KW</given-names></name>
<etal/>
</person-group>. 
<article-title>The clinical characterization of the adult patient with an anxiety or related disorder aimed at personalization of management</article-title>. <source>World Psychiatry</source>. (<year>2021</year>) <volume>20</volume>:<page-range>336&#x2013;56</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/wps.20919</pub-id>, PMID: <pub-id pub-id-type="pmid">34505377</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<label>9</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bystritsky</surname> <given-names>A</given-names></name>
<name><surname>Khalsa</surname> <given-names>SS</given-names></name>
<name><surname>Cameron</surname> <given-names>ME</given-names></name>
<name><surname>Schiffman</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>Current diagnosis and treatment of anxiety disorders</article-title>. <source>Pharm Ther</source>. (<year>2013</year>) <volume>38</volume>:<fpage>30</fpage>.
</mixed-citation>
</ref>
<ref id="B10">
<label>10</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zorowitz</surname> <given-names>S</given-names></name>
<name><surname>Bennett</surname> <given-names>D</given-names></name>
<name><surname>Choe</surname> <given-names>G</given-names></name>
<name><surname>Niv</surname> <given-names>Y</given-names></name>
</person-group>. 
<article-title>A recurring reproduction error in the administration of the Generalized Anxiety Disorder scale</article-title>. <source>Lancet Psychiatry</source>. (<year>2021</year>) <volume>8</volume>:<page-range>180&#x2013;1</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2215-0366(21)00001-8</pub-id>, PMID: <pub-id pub-id-type="pmid">33610220</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<label>11</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Slee</surname> <given-names>A</given-names></name>
<name><surname>Nazareth</surname> <given-names>I</given-names></name>
<name><surname>Bondaronek</surname> <given-names>P</given-names></name>
<name><surname>Liu</surname> <given-names>Y</given-names></name>
<name><surname>Cheng</surname> <given-names>Z</given-names></name>
<name><surname>Freemantle</surname> <given-names>N</given-names></name>
</person-group>. 
<article-title>Pharmacological treatments for generalised anxiety disorder: a systematic review and network meta-analysis</article-title>. <source>Lancet</source>. (<year>2019</year>) <volume>393</volume>:<page-range>768&#x2013;77</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S0140-6736(18)31793-8</pub-id>, PMID: <pub-id pub-id-type="pmid">30712879</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<label>12</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hamilton</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>The assessment of anxiety states by rating</article-title>. <source>Br J Med Psychol</source>. (<year>1959</year>) <volume>32</volume>:<page-range>50&#x2013;5</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/j.2044-8341.1959.tb00467.x</pub-id>, PMID: <pub-id pub-id-type="pmid">13638508</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<label>13</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Thompson</surname> <given-names>E</given-names></name>
</person-group>. 
<article-title>Hamilton rating scale for anxiety (HAM-A)</article-title>. <source>Occup Med</source>. (<year>2015</year>) <volume>65</volume>:<fpage>601</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/occmed/kqv054</pub-id>, PMID: <pub-id pub-id-type="pmid">26370845</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<label>14</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>W</given-names></name>
<name><surname>Zhou</surname> <given-names>B</given-names></name>
<name><surname>Li</surname> <given-names>G</given-names></name>
<name><surname>Luo</surname> <given-names>X</given-names></name>
</person-group>. 
<article-title>Enhanced diagnostics for generalized anxiety disorder: leveraging differential channel and functional connectivity features based on frontal EEG signals</article-title>. <source>Sci Rep</source>. (<year>2024</year>) <volume>14</volume>:<fpage>22789</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-024-73615-1</pub-id>, PMID: <pub-id pub-id-type="pmid">39354007</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<label>15</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>De la Pe&#xf1;a-Arteaga</surname> <given-names>V</given-names></name>
<name><surname>Fern&#xe1;ndez-Rodr&#xed;guez</surname> <given-names>M</given-names></name>
<name><surname>Moreira</surname> <given-names>PS</given-names></name>
<name><surname>Abreu</surname> <given-names>T</given-names></name>
<name><surname>Portugal-Nunes</surname> <given-names>C</given-names></name>
<name><surname>Soriano-Mas</surname> <given-names>C</given-names></name>
<etal/>
</person-group>. 
<article-title>An fMRI study of cognitive regulation of reward processing in generalized anxiety disorder (GAD)</article-title>. <source>Psychiatry Research: Neuroimaging</source>. (<year>2022</year>) <volume>324</volume>:<fpage>111493</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.pscychresns.2022.111493</pub-id>, PMID: <pub-id pub-id-type="pmid">35635931</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<label>16</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gavrilescu</surname> <given-names>M</given-names></name>
<name><surname>Vizireanu</surname> <given-names>N</given-names></name>
</person-group>. 
<article-title>Predicting depression, anxiety, and stress levels from videos using the facial action coding system</article-title>. <source>Sensors</source>. (<year>2019</year>) <volume>19</volume>:<fpage>3693</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s19173693</pub-id>, PMID: <pub-id pub-id-type="pmid">31450687</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<label>17</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tomasi</surname> <given-names>J</given-names></name>
<name><surname>Zai</surname> <given-names>CC</given-names></name>
<name><surname>Pouget</surname> <given-names>JG</given-names></name>
<name><surname>Tiwari</surname> <given-names>AK</given-names></name>
<name><surname>Kennedy</surname> <given-names>JL</given-names></name>
</person-group>. 
<article-title>Heart rate variability: Evaluating a potential biomarker of anxiety disorders</article-title>. <source>Psychophysiology</source>. (<year>2024</year>) <volume>61</volume>:<elocation-id>e14481</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/psyp.14481</pub-id>, PMID: <pub-id pub-id-type="pmid">37990619</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<label>18</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>de Abreu Costa</surname> <given-names>M</given-names></name>
<name><surname>Goncalves</surname> <given-names>FG</given-names></name>
<name><surname>Ferreira-Garcia</surname> <given-names>R</given-names></name>
<name><surname>de Moraes</surname> <given-names>F</given-names></name>
<name><surname>de Nonohay</surname> <given-names>RG</given-names></name>
<name><surname>Manfro</surname> <given-names>GG</given-names></name>
</person-group>. 
<article-title>Heart rate variability as a predictor of improvement in emotional interference in Generalized Anxiety Disorder</article-title>. <source>J Psychiatr Res</source>. (<year>2021</year>) <volume>140</volume>:<page-range>22&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jpsychires.2021.05.059</pub-id>, PMID: <pub-id pub-id-type="pmid">34087752</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<label>19</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ancillon</surname> <given-names>L</given-names></name>
<name><surname>Elgendi</surname> <given-names>M</given-names></name>
<name><surname>Menon</surname> <given-names>C</given-names></name>
</person-group>. 
<article-title>Machine learning for anxiety detection using biosignals: a review</article-title>. <source>Diagnostics</source>. (<year>2022</year>) <volume>12</volume>:<fpage>1794</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics12081794</pub-id>, PMID: <pub-id pub-id-type="pmid">35892505</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<label>20</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hao</surname> <given-names>X</given-names></name>
<name><surname>Ma</surname> <given-names>M</given-names></name>
<name><surname>Meng</surname> <given-names>F</given-names></name>
<name><surname>Liang</surname> <given-names>H</given-names></name>
<name><surname>Liang</surname> <given-names>C</given-names></name>
<name><surname>Liu</surname> <given-names>X</given-names></name>
<etal/>
</person-group>. 
<article-title>Diminished attention network activity and heightened salience-default mode transitions in generalized anxiety disorder: Evidence from resting-state EEG microstate analysis</article-title>. <source>J Affect Disord</source>. (<year>2025</year>) <volume>373</volume>:<page-range>227&#x2013;36</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jad.2024.12.095</pub-id>, PMID: <pub-id pub-id-type="pmid">39743145</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<label>21</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Al-Ezzi</surname> <given-names>A</given-names></name>
<name><surname>Kamel</surname> <given-names>N</given-names></name>
<name><surname>Faye</surname> <given-names>I</given-names></name>
<name><surname>Gunaseli</surname> <given-names>E</given-names></name>
</person-group>. 
<article-title>Analysis of default mode network in social anxiety disorder: EEG resting-state effective connectivity study</article-title>. <source>Sensors</source>. (<year>2021</year>) <volume>21</volume>:<fpage>4098</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s21124098</pub-id>, PMID: <pub-id pub-id-type="pmid">34203578</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<label>22</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ayd&#x131;n</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Alzhemimer&#x2019;s disease is characterized by lower segregation in resting-state eyes-closed EEG</article-title>. <source>J Med Biol Eng</source>. (<year>2024</year>) <volume>44</volume>:<fpage>894</fpage>&#x2013;<lpage>902</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s40846-024-00917-0</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<label>23</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ayd&#x131;n</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Cross-validated adaboost classification of emotion regulation strategies identified by spectral coherence in resting-state</article-title>. <source>Neuroinformatics</source>. (<year>2022</year>) <volume>20</volume>:<page-range>627&#x2013;39</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s12021-021-09542-7</pub-id>, PMID: <pub-id pub-id-type="pmid">34536200</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<label>24</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Luo</surname> <given-names>X</given-names></name>
<name><surname>Zhou</surname> <given-names>B</given-names></name>
<name><surname>Fang</surname> <given-names>J</given-names></name>
<name><surname>Cherif-Riahi</surname> <given-names>Y</given-names></name>
<name><surname>Li</surname> <given-names>G</given-names></name>
<name><surname>Shen</surname> <given-names>X</given-names></name>
</person-group>. 
<article-title>Integrating EEG and ensemble learning for accurate grading and quantification of generalized anxiety disorder: A novel diagnostic approach</article-title>. <source>Diagnostics</source>. (<year>2024</year>) <volume>14</volume>:<fpage>1122</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics14111122</pub-id>, PMID: <pub-id pub-id-type="pmid">38893648</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<label>25</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>D</given-names></name>
<name><surname>Wen</surname> <given-names>W</given-names></name>
<name><surname>Zhang</surname> <given-names>X</given-names></name>
<name><surname>Wu</surname> <given-names>H</given-names></name>
<name><surname>Lei</surname> <given-names>C</given-names></name>
<name><surname>Chao</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Analysis of altered brain dynamics during episodic recall and detection of generalized anxiety disorder</article-title>. <source>Neuroscience</source>. (<year>2023</year>) <volume>524</volume>:<fpage>37</fpage>&#x2013;<lpage>51</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.neuroscience.2023.01.021</pub-id>, PMID: <pub-id pub-id-type="pmid">36707018</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<label>26</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Shen</surname> <given-names>Z</given-names></name>
<name><surname>Li</surname> <given-names>G</given-names></name>
<name><surname>Fang</surname> <given-names>J</given-names></name>
<name><surname>Zhong</surname> <given-names>H</given-names></name>
<name><surname>Wang</surname> <given-names>J</given-names></name>
<name><surname>Sun</surname> <given-names>Y</given-names></name>
<etal/>
</person-group>. 
<article-title>Aberrated multidimensional EEG characteristics in patients with generalized anxiety disorder: a machine-learning based analysis framework</article-title>. <source>Sensors</source>. (<year>2022</year>) <volume>22</volume>:<fpage>5420</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s22145420</pub-id>, PMID: <pub-id pub-id-type="pmid">35891100</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<label>27</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Al-Ezzi</surname> <given-names>A</given-names></name>
<name><surname>Kamel</surname> <given-names>N</given-names></name>
<name><surname>Al-Shargabi</surname> <given-names>AA</given-names></name>
<name><surname>Al-Shargie</surname> <given-names>F</given-names></name>
<name><surname>Al-Shargabi</surname> <given-names>A</given-names></name>
<name><surname>Yahya</surname> <given-names>N</given-names></name>
<etal/>
</person-group>. 
<article-title>Machine learning for the detection of social anxiety disorder using effective connectivity and graph theory measures</article-title>. <source>Front Psychiatry</source>. (<year>2023</year>) <volume>14</volume>:<elocation-id>1155812</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpsyt.2023.1155812</pub-id>, PMID: <pub-id pub-id-type="pmid">37255678</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<label>28</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ke</surname> <given-names>G</given-names></name>
<name><surname>Meng</surname> <given-names>Q</given-names></name>
<name><surname>Finley</surname> <given-names>T</given-names></name>
<name><surname>Wang</surname> <given-names>T</given-names></name>
<name><surname>Chen</surname> <given-names>W</given-names></name>
<name><surname>Ma</surname> <given-names>W</given-names></name>
<etal/>
</person-group>. 
<article-title>Lightgbm: A highly efficient gradient boosting decision tree</article-title>. <source>Adv Neural Inf Process Syst</source>. (<year>2017</year>) <volume>30</volume>.
</mixed-citation>
</ref>
<ref id="B29">
<label>29</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sagi</surname> <given-names>O</given-names></name>
<name><surname>Rokach</surname> <given-names>L</given-names></name>
</person-group>. 
<article-title>Approximating XGBoost with an interpretable decision tree</article-title>. <source>Inf Sci</source>. (<year>2021</year>) <volume>572</volume>:<page-range>522&#x2013;42</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ins.2021.05.055</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<label>30</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Prokhorenkova</surname> <given-names>L</given-names></name>
<name><surname>Gusev</surname> <given-names>G</given-names></name>
<name><surname>Vorobev</surname> <given-names>A</given-names></name>
<name><surname>Dorogush</surname> <given-names>AV</given-names></name>
<name><surname>Gulin</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>CatBoost: unbiased boosting with categorical features</article-title>. <source>Adv Neural Inf Process Syst</source>. (<year>2018</year>) <volume>31</volume>.
</mixed-citation>
</ref>
<ref id="B31">
<label>31</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mou</surname> <given-names>S</given-names></name>
<name><surname>Yan</surname> <given-names>S</given-names></name>
<name><surname>Shen</surname> <given-names>S</given-names></name>
<name><surname>Shuai</surname> <given-names>Y</given-names></name>
<name><surname>Li</surname> <given-names>G</given-names></name>
<name><surname>Shen</surname> <given-names>Z</given-names></name>
<etal/>
</person-group>. 
<article-title>Prolonged disease course leads to impaired brain function in anxiety disorder: a resting state EEG study</article-title>. <source>Neuropsychiatr Dis Treat</source>. (<year>2024</year>) <volume>20</volume>:<page-range>1409&#x2013;19</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2147/NDT.S458106</pub-id>, PMID: <pub-id pub-id-type="pmid">39049937</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<label>32</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Park</surname> <given-names>SM</given-names></name>
<name><surname>Jeong</surname> <given-names>B</given-names></name>
<name><surname>Oh</surname> <given-names>DY</given-names></name>
<name><surname>Choi</surname> <given-names>C-H</given-names></name>
<name><surname>Jung</surname> <given-names>HY</given-names></name>
<name><surname>Lee</surname> <given-names>J-Y</given-names></name>
<etal/>
</person-group>. 
<article-title>Identification of major psychiatric disorders from resting-state electroencephalography using a machine learning approach</article-title>. <source>Front Psychiatry</source>. (<year>2021</year>) <volume>12</volume>:<elocation-id>707581</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpsyt.2021.707581</pub-id>, PMID: <pub-id pub-id-type="pmid">34483999</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<label>33</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Al-Ezzi</surname> <given-names>A</given-names></name>
<name><surname>Yahya</surname> <given-names>N</given-names></name>
<name><surname>Kamel</surname> <given-names>N</given-names></name>
<name><surname>Faye</surname> <given-names>I</given-names></name>
<name><surname>Alsaih</surname> <given-names>K</given-names></name>
<name><surname>Gunaseli</surname> <given-names>E</given-names></name>
</person-group>. 
<article-title>Severity assessment of social anxiety disorder using deep learning models on brain effective connectivity</article-title>. <source>IEEE Access</source>. (<year>2021</year>) <volume>9</volume>:<page-range>86899&#x2013;913</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ACCESS.2021.3089358</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<label>34</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>LeCun</surname> <given-names>Y</given-names></name>
<name><surname>Bengio</surname> <given-names>Y</given-names></name>
<name><surname>Hinton</surname> <given-names>G</given-names></name>
</person-group>. 
<article-title>Deep learning</article-title>. <source>Nature</source>. (<year>2015</year>) <volume>521</volume>:<page-range>436&#x2013;44</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/nature14539</pub-id>, PMID: <pub-id pub-id-type="pmid">26017442</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<label>35</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hu</surname> <given-names>F</given-names></name>
<name><surname>He</surname> <given-names>K</given-names></name>
<name><surname>Wang</surname> <given-names>C</given-names></name>
<name><surname>Zheng</surname> <given-names>Q</given-names></name>
<name><surname>Zhou</surname> <given-names>B</given-names></name>
<name><surname>Li</surname> <given-names>G</given-names></name>
<etal/>
</person-group>. 
<article-title>STRFLNet: spatio-temporal representation fusion learning network for EEG-based emotion recognition</article-title>. <source>IEEE Trans Affect Computing</source>. (<year>2025</year>), <fpage>1</fpage>&#x2013;<lpage>16</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/TAFFC.2025.3611173</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<label>36</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>W</given-names></name>
<name><surname>Li</surname> <given-names>G</given-names></name>
<name><surname>Huang</surname> <given-names>Z</given-names></name>
<name><surname>Jiang</surname> <given-names>W</given-names></name>
<name><surname>Luo</surname> <given-names>X</given-names></name>
<name><surname>Xu</surname> <given-names>X</given-names></name>
</person-group>. 
<article-title>Enhancing generalized anxiety disorder diagnosis precision: MSTCNN model utilizing high-frequency EEG signals</article-title>. <source>Front Psychiatry</source>. (<year>2023</year>) <volume>14</volume>:<elocation-id>1310323</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpsyt.2023.1310323</pub-id>, PMID: <pub-id pub-id-type="pmid">38179243</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<label>37</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Khaleghi</surname> <given-names>N</given-names></name>
<name><surname>Hashemi</surname> <given-names>S</given-names></name>
<name><surname>Peivandi</surname> <given-names>M</given-names></name>
<name><surname>Ardabili</surname> <given-names>SZ</given-names></name>
<name><surname>Behjati</surname> <given-names>M</given-names></name>
<name><surname>Sheykhivand</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>EEG-based functional connectivity analysis of brain abnormalities: A systematic review study</article-title>. <source>Inf Med Unlocked</source>. (<year>2024</year>) <volume>47</volume>:<fpage>101476</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.imu.2024.101476</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<label>38</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Dominicus</surname> <given-names>L</given-names></name>
<name><surname>Lodema</surname> <given-names>D</given-names></name>
<name><surname>Oranje</surname> <given-names>B</given-names></name>
<name><surname>Zandstra</surname> <given-names>M</given-names></name>
<name><surname>Hermans</surname> <given-names>A</given-names></name>
<name><surname>Imhof</surname> <given-names>L</given-names></name>
<etal/>
</person-group>. 
<article-title>Reliability and state-dependency of EEG connectivity, complexity and network characteristics</article-title>. <source>Sci Rep</source>. (<year>2025</year>) <volume>15</volume>:<fpage>38454</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-025-23662-z</pub-id>, PMID: <pub-id pub-id-type="pmid">41188471</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<label>39</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Stam</surname> <given-names>CJ</given-names></name>
<name><surname>Nolte</surname> <given-names>G</given-names></name>
<name><surname>Daffertshofer</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Phase lag index: assessment of functional connectivity from multi channel EEG and MEG with diminished bias from common sources</article-title>. <source>Hum Brain Mapp</source>. (<year>2007</year>) <volume>28</volume>:<page-range>1178&#x2013;93</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/hbm.20346</pub-id>, PMID: <pub-id pub-id-type="pmid">17266107</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<label>40</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>H</given-names></name>
<name><surname>Dai</surname> <given-names>Z</given-names></name>
<name><surname>So</surname> <given-names>D</given-names></name>
<name><surname>Le</surname> <given-names>QV</given-names></name>
</person-group>. 
<article-title>Pay attention to mlps</article-title>. <source>Adv Neural Inf Process Syst</source>. (<year>2021</year>) <volume>34</volume>:<page-range>9204&#x2013;15</page-range>.
</mixed-citation>
</ref>
<ref id="B41">
<label>41</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Christou</surname> <given-names>V</given-names></name>
<name><surname>Miltiadous</surname> <given-names>A</given-names></name>
<name><surname>Tsoulos</surname> <given-names>I</given-names></name>
<name><surname>Karvounis</surname> <given-names>E</given-names></name>
<name><surname>Tzimourta</surname> <given-names>KD</given-names></name>
<name><surname>Tsipouras</surname> <given-names>MG</given-names></name>
<etal/>
</person-group>. 
<article-title>Evaluating the window size&#x2019;s role in automatic EEG epilepsy detection</article-title>. <source>Sensors</source>. (<year>2022</year>) <volume>22</volume>:<fpage>9233</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s22239233</pub-id>, PMID: <pub-id pub-id-type="pmid">36501935</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<label>42</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>JW</given-names></name>
<name><surname>Barma</surname> <given-names>S</given-names></name>
<name><surname>Mak</surname> <given-names>PU</given-names></name>
<name><surname>Chen</surname> <given-names>F</given-names></name>
<name><surname>Li</surname> <given-names>C</given-names></name>
<name><surname>Li</surname> <given-names>MT</given-names></name>
<etal/>
</person-group>. 
<article-title>Single-channel selection for EEG-based emotion recognition using brain rhythm sequencing</article-title>. <source>IEEE J Biomed Health Inf</source>. (<year>2022</year>) <volume>26</volume>:<page-range>2493&#x2013;503</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/JBHI.2022.3148109</pub-id>, PMID: <pub-id pub-id-type="pmid">35120013</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<label>43</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>H</given-names></name>
<name><surname>Mou</surname> <given-names>S</given-names></name>
<name><surname>Pei</surname> <given-names>X</given-names></name>
<name><surname>Zhang</surname> <given-names>X</given-names></name>
<name><surname>Shen</surname> <given-names>S</given-names></name>
<name><surname>Zhang</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>The power spectrum and functional connectivity characteristics of resting-state EEG in patients with generalized anxiety disorder</article-title>. <source>Sci Rep</source>. (<year>2025</year>) <volume>15</volume>:<fpage>5991</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-025-90362-z</pub-id>, PMID: <pub-id pub-id-type="pmid">39966577</pub-id>
</mixed-citation>
</ref>
<ref id="B44">
<label>44</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Al-Ezzi</surname> <given-names>A</given-names></name>
<name><surname>Kamel</surname> <given-names>N</given-names></name>
<name><surname>Faye</surname> <given-names>I</given-names></name>
<name><surname>Gunaseli</surname> <given-names>E</given-names></name>
</person-group>. 
<article-title>Review of EEG, ERP, and brain connectivity estimators as predictive biomarkers of social anxiety disorder</article-title>. <source>Front Psychol</source>. (<year>2020</year>) <volume>11</volume>:<elocation-id>730</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpsyg.2020.00730</pub-id>, PMID: <pub-id pub-id-type="pmid">32508695</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<label>45</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhuang</surname> <given-names>N</given-names></name>
<name><surname>Zeng</surname> <given-names>Y</given-names></name>
<name><surname>Tong</surname> <given-names>L</given-names></name>
<name><surname>Zhang</surname> <given-names>C</given-names></name>
<name><surname>Zhang</surname> <given-names>H</given-names></name>
<name><surname>Yan</surname> <given-names>B</given-names></name>
</person-group>. 
<article-title>Emotion recognition from EEG signals using multidimensional information in EMD domain</article-title>. <source>BioMed Res Int</source>. (<year>2017</year>) <volume>2017</volume>:<fpage>8317357</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2017/8317357</pub-id>, PMID: <pub-id pub-id-type="pmid">28900626</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<label>46</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Langhammer</surname> <given-names>T</given-names></name>
<name><surname>Hilbert</surname> <given-names>K</given-names></name>
<name><surname>Adolph</surname> <given-names>D</given-names></name>
<name><surname>Arolt</surname> <given-names>V</given-names></name>
<name><surname>Bischoff</surname> <given-names>S</given-names></name>
<name><surname>B&#xf6;hnlein</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Resting-state functional connectivity in anxiety disorders: a multicenter fMRI study</article-title>. <source>Mol Psychiatry</source>. (<year>2025</year>) <volume>30</volume>:<page-range>1548&#x2013;57</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41380-024-02768-2</pub-id>, PMID: <pub-id pub-id-type="pmid">39367057</pub-id>
</mixed-citation>
</ref>
<ref id="B47">
<label>47</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chu</surname> <given-names>C-S</given-names></name>
<name><surname>Lin</surname> <given-names>Y-Y</given-names></name>
<name><surname>Huang</surname> <given-names>CC-Y</given-names></name>
<name><surname>Chung</surname> <given-names>Y-A</given-names></name>
<name><surname>Park</surname> <given-names>SY</given-names></name>
<name><surname>Chang</surname> <given-names>W-C</given-names></name>
<etal/>
</person-group>. 
<article-title>Altered electroencephalography-based source functional connectivity in patients with generalized anxiety disorder</article-title>. <source>Clin Neurophysiol</source>. (<year>2025</year>) <volume>175</volume>:<fpage>2010736</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.clinph.2025.04.014</pub-id>, PMID: <pub-id pub-id-type="pmid">40318258</pub-id>
</mixed-citation>
</ref>
<ref id="B48">
<label>48</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Foo</surname> <given-names>M</given-names></name>
<name><surname>Freedle</surname> <given-names>LR</given-names></name>
</person-group>. 
<article-title>The effects of sandplay therapy on the limbic system and prefrontal cortex in women with generalized anxiety disorder</article-title>. <source>Arts Psychother</source>. (<year>2024</year>) <volume>88</volume>:<fpage>102145</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.aip.2024.102145</pub-id>
</mixed-citation>
</ref>
<ref id="B49">
<label>49</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bludau</surname> <given-names>S</given-names></name>
<name><surname>Eickhoff</surname> <given-names>SB</given-names></name>
<name><surname>Mohlberg</surname> <given-names>H</given-names></name>
<name><surname>Caspers</surname> <given-names>S</given-names></name>
<name><surname>Laird</surname> <given-names>AR</given-names></name>
<name><surname>Fox</surname> <given-names>PT</given-names></name>
<etal/>
</person-group>. 
<article-title>Cytoarchitecture, probability maps and functions of the human frontal pole</article-title>. <source>NeuroImage</source>. (<year>2014</year>) <volume>93</volume>:<page-range>260&#x2013;75</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.05.052</pub-id>, PMID: <pub-id pub-id-type="pmid">23702412</pub-id>
</mixed-citation>
</ref>
<ref id="B50">
<label>50</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Chai</surname> <given-names>F</given-names></name>
<name><surname>Zhang</surname> <given-names>H</given-names></name>
<name><surname>Liu</surname> <given-names>X</given-names></name>
<name><surname>Xie</surname> <given-names>P</given-names></name>
<name><surname>Zheng</surname> <given-names>L</given-names></name>
<etal/>
</person-group>. 
<article-title>Cortical functional activity in patients with generalized anxiety disorder</article-title>. <source>BMC Psychiatry</source>. (<year>2016</year>) <volume>16</volume>:<fpage>217</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12888-016-0917-3</pub-id>, PMID: <pub-id pub-id-type="pmid">27388467</pub-id>
</mixed-citation>
</ref>
<ref id="B51">
<label>51</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Al-Zamil</surname> <given-names>M</given-names></name>
<name><surname>Kulikova</surname> <given-names>NG</given-names></name>
<name><surname>Minenko</surname> <given-names>IA</given-names></name>
<name><surname>Shurygina</surname> <given-names>IP</given-names></name>
<name><surname>Petrova</surname> <given-names>MM</given-names></name>
<name><surname>Mansur</surname> <given-names>N</given-names></name>
<etal/>
</person-group>. 
<article-title>Comparative analysis of high-frequency and low-frequency transcutaneous electrical stimulation of the right median nerve in the regression of clinical and neurophysiological manifestations of generalized anxiety disorder</article-title>. <source>J Clin Med</source>. (<year>2024</year>) <volume>13</volume>:<fpage>3026</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/jcm13113026</pub-id>, PMID: <pub-id pub-id-type="pmid">38892737</pub-id>
</mixed-citation>
</ref>
<ref id="B52">
<label>52</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Fang</surname> <given-names>J</given-names></name>
<name><surname>Li</surname> <given-names>G</given-names></name>
<name><surname>Xu</surname> <given-names>W</given-names></name>
<name><surname>Liu</surname> <given-names>W</given-names></name>
<name><surname>Chen</surname> <given-names>G</given-names></name>
<name><surname>Zhu</surname> <given-names>Y</given-names></name>
<etal/>
</person-group>. 
<article-title>Exploring abnormal brain functional connectivity in healthy adults, depressive disorder, and generalized anxiety disorder through EEG signals: A machine learning approach for triple classification</article-title>. <source>Brain Sci</source>. (<year>2024</year>) <volume>14</volume>:<fpage>245</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/brainsci14030245</pub-id>, PMID: <pub-id pub-id-type="pmid">38539633</pub-id>
</mixed-citation>
</ref>
<ref id="B53">
<label>53</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>D&#xed;az</surname> <given-names>H</given-names></name>
<name><surname>Cid</surname> <given-names>FM</given-names></name>
<name><surname>Ot&#xe1;rola</surname> <given-names>J</given-names></name>
<name><surname>Rojas</surname> <given-names>R</given-names></name>
<name><surname>Alarc&#xf3;n</surname> <given-names>O</given-names></name>
<name><surname>Ca&#xf1;ete</surname> <given-names>L</given-names></name>
</person-group>. 
<article-title>EEG Beta band frequency domain evaluation for assessing stress and anxiety in resting, eyes closed, basal conditions</article-title>. <source>Proc Comput Sci</source>. (<year>2019</year>) <volume>162</volume>:<page-range>974&#x2013;81</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.procs.2019.12.075</pub-id>
</mixed-citation>
</ref>
<ref id="B54">
<label>54</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Massullo</surname> <given-names>C</given-names></name>
<name><surname>Carbone</surname> <given-names>GA</given-names></name>
<name><surname>Farina</surname> <given-names>B</given-names></name>
<name><surname>Panno</surname> <given-names>A</given-names></name>
<name><surname>Capriotti</surname> <given-names>C</given-names></name>
<name><surname>Giacchini</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>Dysregulated brain salience within a triple network model in high trait anxiety individuals: A pilot EEG functional connectivity study</article-title>. <source>Int J Psychophysiol</source>. (<year>2020</year>) <volume>157</volume>:<page-range>61&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ijpsycho.2020.09.002</pub-id>, PMID: <pub-id pub-id-type="pmid">32976888</pub-id>
</mixed-citation>
</ref>
<ref id="B55">
<label>55</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>J</given-names></name>
<name><surname>Fang</surname> <given-names>J</given-names></name>
<name><surname>Xu</surname> <given-names>Y</given-names></name>
<name><surname>Zhong</surname> <given-names>H</given-names></name>
<name><surname>Li</surname> <given-names>J</given-names></name>
<name><surname>Li</surname> <given-names>H</given-names></name>
<etal/>
</person-group>. 
<article-title>Difference analysis of multidimensional electroencephalogram characteristics between young and old patients with generalized anxiety disorder</article-title>. <source>Front Hum Neurosci</source>. (<year>2022</year>) <volume>16</volume>:<elocation-id>1074587</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fnhum.2022.1074587</pub-id>, PMID: <pub-id pub-id-type="pmid">36504623</pub-id>
</mixed-citation>
</ref>
<ref id="B56">
<label>56</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Steriade</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>Grouping of brain rhythms in corticothalamic systems</article-title>. <source>Neuroscience</source>. (<year>2006</year>) <volume>137</volume>:<page-range>1087&#x2013;106</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.neuroscience.2005.10.029</pub-id>, PMID: <pub-id pub-id-type="pmid">16343791</pub-id>
</mixed-citation>
</ref>
<ref id="B57">
<label>57</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yu</surname> <given-names>X</given-names></name>
<name><surname>Li</surname> <given-names>Z</given-names></name>
<name><surname>Zang</surname> <given-names>Z</given-names></name>
<name><surname>Liu</surname> <given-names>Y</given-names></name>
</person-group>. 
<article-title>Real-time EEG-based emotion recognition</article-title>. <source>Sensors</source>. (<year>2023</year>) <volume>23</volume>:<fpage>7853</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s23187853</pub-id>, PMID: <pub-id pub-id-type="pmid">37765910</pub-id>
</mixed-citation>
</ref>
<ref id="B58">
<label>58</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Guo</surname> <given-names>X</given-names></name>
<name><surname>Yang</surname> <given-names>F</given-names></name>
<name><surname>Fan</surname> <given-names>L</given-names></name>
<name><surname>Gu</surname> <given-names>Y</given-names></name>
<name><surname>Ma</surname> <given-names>J</given-names></name>
<name><surname>Zhang</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Disruption of functional and structural networks in first-episode, drug-na&#xef;ve adolescents with generalized anxiety disorder</article-title>. <source>J Affect Disord</source>. (<year>2021</year>) <volume>284</volume>:<page-range>229&#x2013;37</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jad.2021.01.088</pub-id>, PMID: <pub-id pub-id-type="pmid">33618206</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1131242">Eleni Stroulia</ext-link>, University of Alberta, Canada</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/604648">Richard Jones</ext-link>, University of Otago, Christchurch, New Zealand</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3259904">Linze Qian</ext-link>, Zhejiang University, China</p></fn>
</fn-group>
</back>
</article>