<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2026.1752176</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>High accuracy EEG signal classification for brain computer interfaces using advanced neural architectures</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Lin</surname>
<given-names>Daicheng</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Qi</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chen</surname>
<given-names>Huan</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lu</surname>
<given-names>Yanjie</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Chen</surname>
<given-names>Haiting</given-names>
</name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Li</surname>
<given-names>Lianfeng</given-names>
</name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mayet</surname>
<given-names>Abdulilah Mohammad</given-names>
</name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2620633"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname>
<given-names>Guodao</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2904183"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Miao</surname>
<given-names>Xinjun</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Qiu</surname>
<given-names>Xianke</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Emergency, Wenzhou Central Hospital</institution>, <city>Wenzhou</city>, <state>Zhejiang</state>, <country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Wenzhou Central Hospital, Affiliated to Wenzhou Medical University</institution>, <state>Zhejiang</state>, <country country="cn">China</country></aff>
<aff id="aff3"><label>3</label><institution>Institute of Intelligent Media Computing, Hangzhou Dianzi University</institution>, <city>Hangzhou</city>, <country country="cn">China</country></aff>
<aff id="aff4"><label>4</label><institution>Shangyu Institute of Science and Engineering Co. Ltd., Hangzhou Dianzi University</institution>, <city>Shaoxing</city>, <country country="cn">China</country></aff>
<aff id="aff5"><label>5</label><institution>School of Data Science and Artificial Intelligence, Wenzhou University of Technology</institution>, <city>Wenzhou</city>, <country country="cn">China</country></aff>
<aff id="aff6"><label>6</label><institution>Network Information Center, The Maternal and Child Health Hospital of Guangxi Zhuang Autonomous Region</institution>, <city>Nanning</city>, <country country="cn">China</country></aff>
<aff id="aff7"><label>7</label><institution>Electrical Engineering Department, King Khalid University</institution>, <city>Abha</city>, <country country="sa">Saudi Arabia</country></aff>
<aff id="aff8"><label>8</label><institution>Fujian Key Laboratory of Big Data Application and Intellectualization for Tea Industry, Wuyi University</institution>, <city>Wuyishan</city>, <country country="sa">China</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Haiting Chen, <email xlink:href="mailto:20210304@wzut.edu.cn">20210304@wzut.edu.cn</email>; Lianfeng Li, <email xlink:href="mailto:lilianfeng6@163.com">lilianfeng6@163.com</email>; Xinjun Miao, <email xlink:href="mailto:miaoxinjun0909@163.com">miaoxinjun0909@163.com</email>; Xianke Qiu, <email xlink:href="mailto:qupids@163.com">qupids@163.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1752176</elocation-id>
<history>
<date date-type="received">
<day>28</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>30</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>30</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Lin, Zhang, Chen, Lu, Chen, Li, Mayet, Zhang, Miao and Qiu.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Lin, Zhang, Chen, Lu, Chen, Li, Mayet, Zhang, Miao and Qiu</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>This study proposes advanced neural network architectures for classifying specific motor-related electroencephalography (EEG) tasks, employing deep feature extraction techniques. We analyzed EEG data from the MILimbEEG dataset, consisting of recordings from 60 individuals as they performed eight distinct motor movements: baseline with eyes open, left-hand closing, right-hand closing, dorsiflexion and plantarflexion of both the left and right feet, as well as rest periods between tasks. The high precision achieved in this study underscores the efficacy of sophisticated computational models like the GMDH network in enhancing the interpretation of EEG signals for the development of brain-computer interfaces (BCIs). This research significantly advances the potential of EEG as a reliable modality for BCIs, effectively translating brain activity into actionable commands suitable for neurorehabilitation and assistive technologies.</p>
</sec>
<sec>
<title>Methods</title>
<p>For each of the 16 electrodes used in the recordings, 10 critical features were extracted, resulting in a comprehensive set of 160 features per sample that encapsulate the intricate brain activities associated with each task. A Group Method of Data Handling (GMDH) neural network, structured with eight hidden layers and a decremental arrangement of neurons from 40 in the first to 5 in the last, was utilized to classify these tasks.</p>
</sec>
<sec>
<title>Results</title>
<p>This network configuration achieved an impressive classification accuracy of approximately 96%, demonstrating a robust capability to accurately decode EEG signals tied to specific motor actions.</p>
</sec>
<sec>
<title>Discussion</title>
<p>The high precision achieved in this study underscores the efficacy of sophisticated computational models like the GMDH network in enhancing the interpretation of EEG signals for the development of brain-computer interfaces (BCIs). This research significantly advances the potential of EEG as a reliable modality for BCIs, effectively translating brain activity into actionable commands suitable for neurorehabilitation and assistive technologies. Our findings contribute substantially to the BCI field, promising to improve clinical outcomes by enabling more precise and effective interaction with neurorehabilitation devices.</p>
</sec>
</abstract>
<kwd-group>
<kwd>brain-computer interface</kwd>
<kwd>EEG</kwd>
<kwd>feature extraction</kwd>
<kwd>GMDH neural networks</kwd>
<kwd>motor movement classification</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work is supported by Wenzhou Major Scientific and Technological Innovation Project (Grant No. ZY2024025) and Wenzhou Basic Public Welfare Scientific Research Project (Y20220194 to XQ) and the Open Project Program of Fujian Key Laboratory of Big Data Application and Intellectualization for Tea Industry, Wuyi University (FKLBDAITI202404), and Deanship of Research and Graduate Studies at King Khalid University for funding this work through Large Research Project under grant number RGP2/415/46.</funding-statement>
</funding-group>
<counts>
<fig-count count="10"/>
<table-count count="3"/>
<equation-count count="8"/>
<ref-count count="37"/>
<page-count count="12"/>
<word-count count="9128"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Imaging Methods</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Electroencephalography (EEG) is a non-invasive technique that records electrical activity of the brain via electrodes placed on the scalp. The ability to monitor and interpret these signals is critical for a wide range of applications, from clinical diagnostics and neurotherapy to advanced interfaces between humans and machines. EEG signals are inherently complex, reflecting the dynamic interplay of neural circuits involved in sensory processing, cognitive functions, and motor responses. Classifying these signals accurately opens up profound possibilities for understanding brain functions, diagnosing neurological disorders, and developing brain-computer interfaces (BCIs) that can translate thoughts into actions. As such, the classification of EEG signals using advanced computational models not only enhances our ability to interpret neural signals&#x2014;specifically identifying patterns associated with motor or cognitive activities&#x2014;but also paves the way for innovative therapeutic strategies and assistive technologies that can significantly improve quality of life for individuals with impaired cognitive or motor functions.</p>
<p>Machine Learning (ML) approaches based on Deep Neural Networks (DNN) are celebrated for significantly boosting the reliability and precision of task detection in EEG-driven systems for prosthetic limbs (<xref ref-type="bibr" rid="ref2">Al-Haddad and Jaber, 2023</xref>). Biomedical engineers are increasingly investigating Artificial Intelligence (AI) techniques that employ EEG data for diagnostic and classification tasks (<xref ref-type="bibr" rid="ref34">Tuncer and Bolat, 2022</xref>; <xref ref-type="bibr" rid="ref24">Li et al., 2023</xref>; <xref ref-type="bibr" rid="ref3">Ali et al., 2023</xref>). Promising results have been observed with deep learning algorithms focused on emotion recognition using EEG, which benefit from advanced feature extraction techniques (<xref ref-type="bibr" rid="ref23">Li et al., 2023</xref>). Additionally, the accuracy in representing and classifying EEG signals has been enhanced by applying multi-task learning algorithms (<xref ref-type="bibr" rid="ref9">Choo et al., 2023</xref>). In the area of EEG Motor Imagery (MI), Convolutional Neural Networks (CNNs) have proven effective, particularly those that utilize several layers to analyze spatial and temporal data (<xref ref-type="bibr" rid="ref4">Amin et al., 2019</xref>). Meanwhile, the complex legal requirements for medical devices have been scrutinized, highlighting the need for innovative, data-driven methods in their design and approval (<xref ref-type="bibr" rid="ref5">Arnould et al., 2021</xref>). Moreover, the development of prosthetic sockets, traditionally dependent on expert analysis, has recently started integrating machine learning methods (<xref ref-type="bibr" rid="ref13">Dickinson et al., 2021</xref>). These studies collectively highlight the crucial role of AI in tackling complex issues within various branches of biomedical engineering.</p>
<p>In prosthetic limb control systems, there has been a traditional reliance on electromyography (EMG) data (<xref ref-type="bibr" rid="ref7">Baygin et al., 2022</xref>; <xref ref-type="bibr" rid="ref35">Tuncer et al., 2020</xref>). Various approaches for analyzing hand movements and activities using surface EMG data have been established (<xref ref-type="bibr" rid="ref14">Fatimah et al., 2021</xref>; <xref ref-type="bibr" rid="ref22">Karnam et al., 2022</xref>). Initial investigations have explored gesture detection control in bionic hands through the use of surface EMG data (<xref ref-type="bibr" rid="ref32">Shi et al., 2018</xref>). While EMG-based systems are highly effective, integrating EEG represents a notable improvement. Employing EEG to monitor brain activity provides a more intuitive and potentially more accurate control system compared to EMG, which records muscle electrical activities. Recent research has shown promise for superior control systems utilizing a hybrid EEG-FNIRS Brain-Computer Interface that engages ensemble learning and nonlinear feature extraction techniques (<xref ref-type="bibr" rid="ref25">Maher et al., 2023</xref>).</p>
<p>In the realm of biomedical engineering, there is a significant shift toward more sophisticated computational techniques, particularly within the domain of prosthetic technology. Noteworthy progress has been observed in Brain-Computer Interface (BCI) systems that integrate machine learning algorithms on Field Programmable Gate Arrays (FPGA), notably in accurately determining motor intentions for prosthetic hand movements (<xref ref-type="bibr" rid="ref10">Constantine et al., 2021</xref>). Studies focusing on cost-effective, high-performance prosthetics for the upper limbs have utilized EEG data alongside cutting-edge deep learning methods, like LSTM networks that are refined with Genetic Algorithms (GA) to classify motion intentions in real time (<xref ref-type="bibr" rid="ref21">Kansal et al., 2023</xref>). Additionally, haptic feedback mechanisms that mimic the activities of mechanoreceptors to provide tactile feedback have been incorporated into EEG-based control systems for economical robotic hand prostheses, enhancing the capability to recognize objects and shapes (<xref ref-type="bibr" rid="ref11">Cutipa-Puma et al., 2023</xref>).</p>
<p>In recent years, the application of deep convolutional neural networks (CNNs) in brain-computer interface (BCI) research has led to significant advancements in the classification of EEG signals. These models, known for their ability to capture complex spatial and temporal patterns, have achieved remarkable performance in various domains. For instance, a study on EEG-based mobile robot control demonstrated the effectiveness of a CNN integrated with a robotic operating system (ROS) for enhancing real-time control. While this approach showed high accuracy in translating EEG signals into actionable commands, its computational intensity posed challenges for offline applications and resource-constrained settings (<xref ref-type="bibr" rid="ref16">Ghinoiu et al., 2024</xref>). Similarly, a hybrid deep learning framework combining CNNs with autoencoders was developed for EEG-based emotion recognition. This ensemble model exhibited robust accuracy in detecting emotions but required significant computational resources, limiting its adaptability for portable systems. The reliance on high-performance hardware emphasized the gap in implementing such models for real-time applications (<xref ref-type="bibr" rid="ref36">Yousefipour et al., 2024</xref>). Another noteworthy example is a CNN-based architecture tailored for emotion recognition in a continuous valence-arousal-dominance (VAD) space. By employing 2D convolutional layers, this model effectively captured the spatial and temporal dynamics of EEG signals. However, the computational demands of the system highlighted its limitations in being deployed in environments with limited hardware capabilities (<xref ref-type="bibr" rid="ref19">Jon et al., 2024</xref>). In the realm of motor imagery EEG classification, a novel CNN architecture using a sliding window technique achieved state-of-the-art performance in distinguishing motor tasks. Despite its efficacy, the high computational overhead raised concerns regarding its feasibility for real-time or offline BCI applications (<xref ref-type="bibr" rid="ref33">Singh et al., 2024</xref>). Additionally, research utilizing a ResNet-101 architecture for improved motor imagery classification highlighted the model&#x2019;s ability to extract nuanced features. Nevertheless, its deep structure led to significant resource consumption, making it challenging for integration into practical systems (<xref ref-type="bibr" rid="ref31">Sharma et al., 2024</xref>). These studies collectively underscore the potential of CNN-based approaches in advancing EEG signal processing. However, they also reveal a critical limitation: the computational complexity inherent in deep architectures, which necessitates powerful and often costly hardware. This constraint poses significant challenges for real-time, offline, or embedded systems, particularly in neurorehabilitation and assistive technologies.</p>
<p>Although the individual components of the proposed framework&#x2014;manual EEG feature extraction and GMDH-based learning&#x2014;have been explored in prior studies, the originality of this work lies in their task-specific integration and systematic design for multi-class motor task decoding under computational constraints. First, this study addresses an eight-class EEG motor task classification problem involving both upper- and lower-limb movements, which is more challenging than the binary or four-class motor imagery tasks commonly investigated in lightweight BCI literature. Second, we propose a decremental multi-layer GMDH architecture specifically tailored to EEG-derived features, enabling progressive feature abstraction while maintaining strict control over model complexity. This architectural design has not been systematically explored in prior EEG-based GMDH applications. Third, by combining physiologically interpretable time- and frequency-domain EEG features with a self-organizing polynomial network, the proposed approach bridges traditional feature engineering and adaptive learning. This integration yields a robust and computationally efficient alternative to both classical shallow classifiers and data-hungry deep learning models. Finally, the proposed framework is quantitatively validated in terms of both classification performance and computational efficiency, demonstrating that high accuracy can be achieved without relying on deep architectures. These aspects collectively distinguish the present work from existing lightweight EEG classification methods.</p>
<p><xref ref-type="fig" rid="fig1">Figure 1</xref> provides a flowchart summarizing the key steps of the proposed methodology. The process begins with the collection of EEG signals during predefined motor tasks, followed by the extraction of significant features that encapsulate the temporal and spectral characteristics of the signals. These features serve as inputs to the GMDH neural network, designed with a decremental neuron architecture in its hidden layers to optimize classification accuracy. Finally, the outputs of the network are compared with the target outputs to evaluate performance, ensuring the reliability of the classification results.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Flowchart of the proposed methodology, illustrating the sequential steps of data collection, feature extraction, GMDH neural network implementation, and output evaluation.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Four-panel infographic with a green background illustrating EEG signal processing for neural network training. Panel one shows an electrode cap map labeled by brain regions with the caption &#x201C;Collecting EEG signals.&#x201D; Panel two features a head silhouette with abstract brainwaves and the caption &#x201C;Extracting features from collected signals.&#x201D; Panel three displays a neural network diagram captioned &#x201C;Training GMDH neural network to detect body movements.&#x201D; Panel four contains a confusion matrix chart with the caption &#x201C;Comparing network output with target output.&#x201D; Blue arrows connect each step in sequence.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec2">
<label>2</label>
<title>Data acquisition</title>
<p>The EEG data analyzed in this study were obtained from the publicly available MILimbEEG dataset (<xref ref-type="bibr" rid="ref6">Asanza et al., 2023</xref>), which was selected due to its inclusion of multi-class motor execution tasks involving both upper and lower limbs, enabling a comprehensive evaluation of EEG-based brain&#x2013;computer interface (BCI) systems under realistic motor control scenarios. EEG signals were acquired from 60 healthy participants using an OpenBCI Cyton + Daisy biosensing platform. Recordings were obtained from 16 dry electrodes positioned according to the international 10&#x2013;10 electrode placement system, providing coverage over sensorimotor cortical regions relevant to motor execution. The spatial configuration of the electrodes and their correspondence to underlying cortical areas are illustrated in <xref ref-type="fig" rid="fig2">Figure 2</xref>, which highlights the distribution of channels over motor-related Brodmann areas. EEG signals were sampled at a frequency of 125&#x202F;Hz. As described in the original dataset documentation, the recorded EEG signals were preprocessed using a bandpass filter between 7 and 31&#x202F;Hz, preserving the <italic>&#x03BC;</italic> (7.5&#x2013;12.5&#x202F;Hz) and <italic>&#x03B2;</italic> (16&#x2013;31&#x202F;Hz) frequency bands that are strongly associated with motor-related cortical activity, while attenuating low-frequency drift and high-frequency noise. Signal normalization was applied to reduce inter-subject variability and to standardize signal amplitudes across recording sessions. In this study, we directly utilized the preprocessed signals provided by the dataset without introducing additional filtering stages.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>EEG electrode configuration on the 10&#x2013;10 system. Reproduced from <xref ref-type="bibr" rid="ref6">Asanza et al. (2023)</xref>, licensed under <ext-link xlink:href="https://creativecommons.org/licenses/by/4.0/" ext-link-type="uri">CC BY 4.0</ext-link>.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Diagram of an EEG electrode cap showing numbered electrode placements over different brain regions, with color-coded circles representing the frontal lobe (blue), motor cortex (orange), temporal lobe (gray), central sulcus (green), sensorimotor cortex (yellow), parietal lobe (pink), and occipital lobe (lavender), alongside a corresponding legend explaining each color.</alt-text>
</graphic>
</fig>
<p>The experimental setup adopted during data acquisition is shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>, where participants were seated in a fixed posture to ensure consistent recording conditions across sessions. Visual cues presented on a monitor guided participants through a predefined sequence of motor tasks and rest intervals, as illustrated in <xref ref-type="fig" rid="fig4">Figure 4</xref>. This structured task timeline ensured temporal consistency across trials and facilitated the segmentation of EEG signals corresponding to individual motor actions. Each participant performed eight distinct motor tasks, including baseline with eyes open, left-hand closing, right-hand closing, dorsal and plantar flexion of the left and right feet, as well as resting periods between tasks. This protocol yielded a total of 480 EEG samples (60 participants &#x00D7; 8 tasks), with balanced representation across all task categories. A representative example of the recorded EEG signals during task execution is shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>, illustrating the normalized EEG waveforms used for subsequent feature extraction.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Setup of the experimental environment for EEG recording. Reproduced from <xref ref-type="bibr" rid="ref6">Asanza et al. (2023)</xref>, licensed under <ext-link xlink:href="https://creativecommons.org/licenses/by/4.0/" ext-link-type="uri">CC BY 4.0</ext-link>.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Diagram showing a person seated in a reclined chair with arm and leg supports, wearing an OpenBCI headset, positioned 1.5 meters from a monitor. Both the arm and leg are bent at one hundred forty-five degrees.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Visual timeline of motor tasks and rest intervals. Reproduced from <xref ref-type="bibr" rid="ref6">Asanza et al. (2023)</xref>, licensed under <ext-link xlink:href="https://creativecommons.org/licenses/by/4.0/" ext-link-type="uri">CC BY 4.0</ext-link>.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Sequential diagram showing alternating visual stimuli and rest periods. Each black screen panel contains labeled conditions: BEO, PLF, CRH, DRF, CLH, each with distinct white symbols or shapes, separated by rest screens with a white cross, all displayed for four seconds per stage.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Sample EEG data trace during motor task execution.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph showing electrical activity from sixteen electrodes over a four-second period, with voltage fluctuations in millivolts on the y-axis and time in seconds on the x-axis, each electrode represented by a different colored line.</alt-text>
</graphic>
</fig>
<p>Rather than reproducing the full experimental protocol detailed in the original MILimbEEG publication, this study focuses on the study-specific utilization of the dataset, including feature extraction, classification architecture, and evaluation strategy. The MILimbEEG dataset was chosen because it enables the assessment of computationally efficient learning models on a challenging eight-class motor execution problem, which extends beyond the binary or four-class motor imagery tasks commonly considered in lightweight EEG classification studies.</p>
</sec>
<sec id="sec3">
<label>3</label>
<title>Feature extraction from EEG signals</title>
<p>Feature extraction is a pivotal process in the analysis of EEG data, particularly when these signals are used to decipher and classify brain activity related to specific tasks. Effective feature extraction transforms raw EEG data into a more manageable set of features that capture the essential characteristics of the brain signals. This transformation is crucial for enhancing the performance of machine learning models, such as neural networks, by providing them with robust and informative inputs that highlight the underlying patterns of neural activity.</p>
<p>In this study, we extract a comprehensive set of 10 features from the EEG signals recorded at each of 16 electrodes. These features encompass both time and frequency domain characteristics, providing a multidimensional representation of the EEG signals that are associated with various motor tasks. The extracted features include:</p>
<p><italic>Frequency band powers:</italic> The power spectral density of the EEG signals is calculated within specific frequency bands known to be relevant to brain activity. These bands are (<xref ref-type="bibr" rid="ref17">Han, 2025</xref>):</p>
<list list-type="bullet">
<list-item><p>Delta (0.5&#x2013;4&#x202F;Hz): Associated with sleep and deep relaxation.</p></list-item>
<list-item><p>Theta (4&#x2013;8&#x202F;Hz): Linked to meditation, memory recall, and relaxation.</p></list-item>
<list-item><p>Alpha (8&#x2013;13&#x202F;Hz): Related to states of relaxation while awake.</p></list-item>
<list-item><p>Beta (13&#x2013;30&#x202F;Hz): Generally associated with active thinking, problem-solving, and active concentration.</p></list-item>
<list-item><p>Gamma (30&#x2013;50&#x202F;Hz): Connected with high-level information processing and cognitive functioning.</p></list-item>
</list>
<p>For each band, the power is computed using the Fast Fourier Transform (FFT) of the EEG signal, and integrating the square of the FFT&#x2019;s magnitude within the band limits shown in <xref ref-type="disp-formula" rid="E1">Equation 1</xref> (<xref ref-type="bibr" rid="ref20">Kalauzi et al., 2025</xref>):</p>
<disp-formula id="E1">
<mml:math id="M1">
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi>b</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>f</mml:mi>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>low</mml:mi>
</mml:msub>
</mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mtext>high</mml:mtext>
</mml:msub>
</mml:munderover>
<mml:mi>X</mml:mi>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:math>
<label>(1)</label>
</disp-formula>
<p>where <italic>X</italic>(<italic>f</italic>) is the Fast Fourier Transform (FFT) of the EEG signal, and <inline-formula>
<mml:math id="M2">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>low</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>flow and <inline-formula>
<mml:math id="M3">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mtext>high</mml:mtext>
</mml:msub>
</mml:math>
</inline-formula> are the lower and upper frequency limits of band b.</p>
<p><italic>Dominant frequency:</italic> The dominant frequency feature pinpoints the frequency that exhibits the maximum amplitude within an EEG signal&#x2019;s power spectrum, indicative of the most prominent rhythmic activity during the measured period. This feature is crucial for identifying the primary brain state associated with various cognitive or motor tasks. To determine the dominant frequency, the EEG signal is first transformed from the time domain to the frequency domain using the FFT. The power of each frequency component is then computed, and the frequency with the highest power is identified as the dominant frequency. The process can be expressed with <xref ref-type="disp-formula" rid="E2">Equations 2</xref>, <xref ref-type="disp-formula" rid="E3">3</xref> (<xref ref-type="bibr" rid="ref30">Sharma et al., 2021</xref>):</p>
<disp-formula id="E2">
<mml:math id="M4">
<mml:mi>P</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mi>X</mml:mi>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:math>
<label>(2)</label>
</disp-formula>
<disp-formula id="E3">
<mml:math id="M5">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>dom</mml:mi>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mtext>argmax</mml:mtext>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>P</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>f</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
<label>(3)</label>
</disp-formula>
<p>where <italic>P</italic>(<italic>f</italic>) represents the power at frequency <italic>f</italic>, and <inline-formula>
<mml:math id="M6">
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mi>dom</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> is the dominant frequency where the maximum power occurs. This feature is essential for assessing the EEG signal&#x2019;s behavior and plays a pivotal role in classifying EEG data accurately for both diagnostic and interactive applications in brain-computer interfaces.</p>
<p><italic>Mean amplitude:</italic> The average amplitude provides a measure of the signal&#x2019;s average power level across the entire recording, calculated as <xref ref-type="disp-formula" rid="E4">Equation 4</xref>:</p>
<disp-formula id="E4">
<mml:math id="M7">
<mml:mi>&#x03BC;</mml:mi>
<mml:mo>=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>N</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
<mml:mo>&#x2223;</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">[</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo stretchy="true">]</mml:mo>
<mml:mo>&#x2223;</mml:mo>
</mml:math>
<label>(4)</label>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M8">
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">[</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo stretchy="true">]</mml:mo>
</mml:math>
</inline-formula> is the signal amplitude at sample <italic>n</italic>, and <italic>N</italic> is the total number of samples.</p>
<p><italic>Standard deviation of amplitude</italic> shown in <xref ref-type="disp-formula" rid="E4">Equation 5</xref>: This statistical measure provides an index of the variability in the signal amplitude:</p>
<disp-formula id="E5">
<mml:math id="M9">
<mml:mi>&#x03C3;</mml:mi>
<mml:mo>=</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mi>N</mml:mi>
</mml:mfrac>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:mo>&#x2223;</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">[</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo stretchy="true">]</mml:mo>
<mml:mo>&#x2223;</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>&#x03BC;</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:msqrt>
</mml:math>
<label>(5)</label>
</disp-formula>
<p><italic>Median amplitude</italic> shown in <xref ref-type="disp-formula" rid="E6">Equation 6</xref>: The median amplitude is a robust measure of the central tendency of the signal amplitude, less sensitive to outliers than the mean:</p>
<disp-formula id="E6">
<mml:math id="M10">
<mml:mtext>median</mml:mtext>
<mml:mo>=</mml:mo>
<mml:mtext>median</mml:mtext>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">[</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo stretchy="true">]</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
<label>(6)</label>
</disp-formula>
<p><italic>Peak-to-peak amplitude</italic> shown in <xref ref-type="disp-formula" rid="E7">Equation 7</xref>: This feature measures the difference between the maximum and minimum amplitudes in the EEG signal, providing an estimate of the signal&#x2019;s dynamic range (<xref ref-type="bibr" rid="ref26">Mak et al., 2012</xref>):</p>
<disp-formula id="E7">
<mml:math id="M11">
<mml:mi mathvariant="italic">ptp</mml:mi>
<mml:mo>=</mml:mo>
<mml:mo>max</mml:mo>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">[</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo stretchy="true">]</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:mo>min</mml:mo>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="true">[</mml:mo>
<mml:mi>n</mml:mi>
<mml:mo stretchy="true">]</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
<label>(7)</label>
</disp-formula>
<p>These features were extracted for each of the 16 electrodes and for each sample, resulting in a feature matrix with dimensions corresponding to the number of samples times the product of 10 features and 16 electrodes. This comprehensive feature set serves as the input to the GMDH neural network, facilitating the precise classification of EEG signals into categories corresponding to specific motor tasks. This approach underscores the efficacy of using advanced computational models to interpret complex brain signals for applications in brain-computer interfaces and neurorehabilitation technologies.</p>
</sec>
<sec id="sec4">
<label>4</label>
<title>GMDH neural network</title>
<p>The architecture of a GMDH network tailored for EEG signal classification involves a highly adaptive and hierarchical approach, designed to effectively model and predict complex relationships among input features that represent EEG data. The input layer of the GMDH network consists of 160 inputs. These inputs represent extracted features from EEG signals, which might include spectral powers in various frequency bands, statistical summaries of the signals (mean, ptp, etc.), and other relevant features that capture the dynamics and characteristics of the EEG during different motor tasks. Each feature provides crucial information that aids in distinguishing between the types of motor tasks. The GMDH network uses an auto-selective layer formation process where neurons in each layer are formed based on the polynomial regression models of the inputs or the outputs from the previous layers. Each neuron represents a polynomial equation (as shown in <xref ref-type="disp-formula" rid="E8">Equation 8</xref>), which might start as a simple quadratic model and can become progressively more complex in higher layers if required (<xref ref-type="bibr" rid="ref18">Ivakhnenko, 1971</xref>):</p>
<disp-formula id="E8">
<mml:math id="M12">
<mml:mi>y</mml:mi>
<mml:mo>=</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>3</mml:mn>
</mml:msub>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mn>1</mml:mn>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>4</mml:mn>
</mml:msub>
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mn>2</mml:mn>
<mml:mn>2</mml:mn>
</mml:msubsup>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>5</mml:mn>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mo>&#x2026;</mml:mo>
</mml:math>
<label>(8)</label>
</disp-formula>
<p>Here, <inline-formula>
<mml:math id="M13">
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>.</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>.</mml:mo>
<mml:mo>&#x2026;</mml:mo>
</mml:math>
</inline-formula> are inputs from the previous layer or the initial data layer, <inline-formula>
<mml:math id="M14">
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>0</mml:mn>
</mml:msub>
<mml:mo>.</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>.</mml:mo>
<mml:msub>
<mml:mi>b</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>.</mml:mo>
<mml:mo>&#x2026;</mml:mo>
</mml:math>
</inline-formula> are the coefficients learned during training, and y is the output used as an input for the next layer.</p>
<p>The architecture of the GMDH network implemented for EEG signal classification in this study is meticulously designed to optimize the process of identifying and categorizing motor-related brain activities. The network consists of a single input layer, eight hidden layers, and a concluding output layer. The input layer is tasked with handling 160 features extracted from EEG data, which encapsulate the essential characteristics necessary for initial data analysis. These features feed into a series of eight hidden layers, which are strategically structured to refine the data processing through successive stages of complexity and abstraction. The neuron count in these hidden layers is carefully configured to gradually reduce the dimensionality of the data, starting with 40 neurons in the first hidden layer and decreasing through subsequent layers to 38, 27, 22, 16, 10, 8, and finally 5 neurons in the last hidden layer. This decremental structuring aids in distilling the most salient features and interactions relevant to the motor tasks. The output layer culminates in a single neuron, which classifies the EEG data into one of the eight predefined motor task categories. This layered and progressively refining architecture ensures a balanced approach to learning, capturing deep and complex patterns in the EEG signals while avoiding overfitting, thereby enhancing the network&#x2019;s ability to make accurate and robust predictions. Each class corresponds to a specific motor task and is numerically represented as follows:</p>
<p>(1) Baseline with Eyes Open (BEO), (2) Closing Left Hand (CLH), (3) Closing Right Hand (CRH), (4) Dorsal Flexion of Left Foot (DLF), (5) Plantar Flexion of Left Foot (PLF), (6) Dorsal Flexion of Right Foot (DRF), (7) Plantar Flexion of Right Foot (PRF), and (8) Resting between tasks (Rest).</p>
<p>The dataset comprised recordings from 60 participants, each performing 8 distinct motor tasks, resulting in a total of 480 samples. These samples were divided into training and testing sets, with 70% of the data randomly selected for training and the remaining 30% used for testing. The random allocation of samples ensured a balanced and representative distribution across both subsets, reducing potential biases and enhancing the robustness of the classification model. This approach facilitated a comprehensive evaluation of the model&#x2019;s performance on previously unseen data, ensuring the reliability of the results.</p>
<p>These classifications are based on the patterns recognized in the EEG signals, allowing for the effective categorization of brain activity corresponding to each task. In MATLAB, the entire network, including its iterative development of layers and neuron connections, is coded. The training involves adjusting the polynomial coefficients to minimize classification errors on a training dataset, with validation datasets used to prevent overfitting and to optimize the network structure. The adaptability and depth of the GMDH approach make it exceptionally suitable for EEG data, which often contains subtle and complex patterns associated with different brain activities. The end-to-end training and classification implemented in MATLAB ensure that the model is both robust and accurate, capable of handling the intricacies of EEG signal classification effectively.</p>
<p>The reported results correspond to a subject-dependent evaluation protocol, where EEG samples from all participants are randomly divided into training and testing sets. As a result, data from the same subjects may appear in both subsets. This evaluation strategy assesses the model&#x2019;s ability to discriminate motor tasks under consistent subject-specific signal characteristics. While this setup is commonly used in EEG classification studies to evaluate algorithmic performance, extending the evaluation to a subject-independent protocol (e.g., leave-one-subject-out validation) would provide deeper insight into cross-subject generalizability and is considered an important direction for future work.</p>
</sec>
<sec sec-type="results" id="sec5">
<label>5</label>
<title>Results and discussion</title>
<p>This study has demonstrated the effective application of a GMDH neural network in classifying EEG signals into eight distinct motor task categories with an impressive overall accuracy of 96.5% on test data. This performance highlights the network&#x2019;s capability to handle complex pattern recognition tasks in neurophysiological data, a cornerstone in the development of advanced BCIs.</p>
<p>The accuracy achieved by the neural network is indicative of the robustness and suitability of the feature extraction techniques employed in this study. These techniques, which included analysis of spectral components, time-domain features, and statistical characteristics of EEG signals, provided a comprehensive dataset from which the network could learn. The high level of accuracy achieved suggests that the features were well-chosen, capturing essential information needed to distinguish between the motor tasks effectively. The network was carefully architected with one input layer, eight hidden layers of descending neuron counts, and one output layer. This structure allowed the network to gradually refine its understanding of the data, layer by layer, improving its predictive accuracy while avoiding the pitfalls of overfitting. This approach demonstrates the strength of using a methodical layer reduction strategy in deep learning models where the complexity of the model is balanced against its performance.</p>
<p>To ensure the robustness and reliability of the proposed classification approach, a 5-fold cross-validation was employed. In this method, the dataset is divided into five equally sized subsets (folds). During each iteration, four folds are used for training the model, and the remaining fold is used for testing. This process is repeated five times, ensuring that each fold is used once as the testing set. The average performance metrics from all iterations provide a comprehensive assessment of the model&#x2019;s generalizability. <xref ref-type="fig" rid="fig6">Figure 6</xref> illustrates the results of the 5-fold cross-validation, highlighting the classification accuracy achieved in each fold. The accuracy across folds ranged from 90 to 96%, with a mean accuracy of approximately 95%. This consistent performance across folds confirms the stability of the proposed methodology and its ability to generalize effectively across unseen data splits.</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>Results of 5-fold cross-validation, showing classification accuracy for each fold.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g006.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart illustrating 5-fold cross-validation accuracy, with fold numbers on the x-axis and accuracy percentages on the y-axis, showing accuracies between 94 percent and 97 percent across the five folds.</alt-text>
</graphic>
</fig>
<p>The experimental evaluation follows a two-stage protocol to ensure a fair and transparent performance assessment. First, the dataset is randomly divided into 70% training data and 30% testing data, where the test set remains completely unseen during model training and hyperparameter selection. The reported 96% classification accuracy corresponds exclusively to this held-out test set. Second, 5-fold cross-validation is applied independently on the full dataset to evaluate the stability and robustness of the proposed method across different data partitions. The cross-validation results are reported as complementary performance indicators and are not mixed with the test-set evaluation.</p>
<p>The confusion matrices for both the training and testing datasets provide a clear visual representation of the model&#x2019;s performance across different classes (<xref ref-type="fig" rid="fig7">Figure 7</xref>). For training data, high accuracy across all tasks suggests that the model has effectively learned the training patterns. However, the real test of the model&#x2019;s utility is reflected in the test data, where the model must apply what it has learned to previously unseen data. Here, the model also performed exceptionally well, particularly in differentiating between more distinct tasks such as Baseline with Eyes Open (BEO) and various motor flexions. Notably, some confusion did occur between tasks that are similar in nature, as reflected in the slight mixing of classes such as Dorsal and Plantar flexions. These areas of confusion offer valuable insights into where the model might be improved through further training or enhanced feature extraction, perhaps by incorporating additional or more nuanced features that can better differentiate between similar motor activities. The confusion matrices are computed for the trained GMDH network that achieved the 96.5% test accuracy, based on the fixed train&#x2013;test split. These matrices are generated separately for the training and testing phases and provide a detailed class-wise performance analysis. In particular, they illustrate the distribution of correctly classified motor tasks along the diagonal entries, as well as misclassification patterns between physiologically similar classes, such as dorsal and plantar flexion movements. This analysis enables a deeper interpretation of class-wise robustness and complements the overall accuracy and cross-validation results by revealing task-specific classification behavior.</p>
<fig position="float" id="fig7">
<label>Figure 7</label>
<caption>
<p>Confusion matrices for training and testing data.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g007.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Side-by-side confusion matrices for test and train data, each showing target classes on the horizontal axis and output classes on the vertical axis, with color-coded cells highlighting correct predictions in green and incorrect ones in pink, and cell values displaying counts and percentages for model classification performance.</alt-text>
</graphic>
</fig>
<p>The confusion matrix used in this study is based on a dataset comprising 480 samples, with each of the 60 subjects contributing 8 samples corresponding to distinct motor tasks. These samples were balanced across classes to ensure an unbiased evaluation of the model&#x2019;s performance. To improve classification accuracy, features were extracted across multiple frequencies within the <italic>&#x03BC;</italic> (7.5&#x2013;12.5&#x202F;Hz) and <italic>&#x03B2;</italic> (16&#x2013;31&#x202F;Hz) bands for each subject.</p>
<p>The computational efficiency of the proposed GMDH-based classifier was quantitatively analyzed in terms of parameter count, arithmetic operations, and inference time. The implemented architecture consists of eight hidden layers with 40&#x2013;38&#x2013;27&#x2013;22&#x2013;16&#x2013;10&#x2013;8&#x2013;5 quadratic polynomial neurons, resulting in a total of 166 neurons. Each neuron employs a second-order polynomial with two inputs and six coefficients, yielding exactly 996 trainable parameters. During inference, each polynomial neuron requires 8 multiplications and 5 additions, leading to a total of 1,328 multiplications and 830 additions, i.e., 2,158 arithmetic operations per sample for a complete forward pass through the network. Inference time was measured using a MATLAB R2023a implementation executed in single-thread CPU mode on an Intel Core i7-12700H processor. The reported inference time corresponds to the average over 1,000 forward passes with batch size equal to one. Under these conditions, the proposed model achieved an inference time of 3.21&#x202F;ms per sample. This quantitative analysis demonstrates that the proposed method combines high classification accuracy with a compact model size and fast inference, making it well suited for real-time EEG decoding and deployment on FPGA or other resource-constrained embedded platforms, where memory footprint and computational efficiency are critical.</p>
<p>The depth and decremental structure of the proposed GMDH network were selected to balance representational capacity and computational efficiency. Increasing network depth enables hierarchical modeling of nonlinear interactions among EEG-derived features, while progressively reducing the number of neurons across layers limits redundancy and mitigates overfitting. This architectural strategy is particularly suitable for EEG datasets with limited sample sizes, where overly complex models often fail to generalize. Rather than introducing a novel GMDH variant, this study focuses on a task-driven architectural configuration that leverages the self-organizing properties of GMDH to achieve high classification accuracy under strict computational constraints.</p>
<p>To provide a comprehensive assessment of the proposed method, performance evaluation was conducted using both global metrics and per-class metrics, addressing the limitations of relying solely on overall accuracy in multiclass EEG classification tasks. <xref ref-type="table" rid="tab1">Table 1</xref> summarizes the global performance metrics for both the training and testing datasets, including overall accuracy, Cohen&#x2019;s kappa coefficient, and macro- and weighted-averaged precision, recall, and F1-score. On the test dataset, the proposed GMDH-based framework achieved an overall accuracy of 96.53% and a Cohen&#x2019;s kappa value of 0.9602, indicating an almost perfect agreement beyond chance. The macro-averaged F1-score of 0.9650 demonstrates balanced classification performance across all classes, while the weighted F1-score of 0.9659 confirms robustness when accounting for class distribution. To further analyze class-specific behavior, <xref ref-type="table" rid="tab2">Table 2</xref> reports the per-class precision, recall, and F1-score computed on the test dataset for each motor task. This fine-grained evaluation reveals that most classes achieve perfect or near-perfect precision and recall, highlighting the strong discriminative capability of the proposed method. Slight performance degradation is observed primarily for anatomically and physiologically similar movements, such as dorsal and plantar flexion of the foot, which are known to produce overlapping EEG activation patterns. Despite this inherent challenge, the corresponding F1-scores remain high, indicating reliable and balanced classification performance.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Classification performance metrics for training and testing datasets.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Dataset</th>
<th align="center" valign="top">Accuracy (%)</th>
<th align="center" valign="top">Cohen&#x2019;s kappa</th>
<th align="center" valign="top">Macro precision</th>
<th align="center" valign="top">Macro recall</th>
<th align="center" valign="top">Macro F1</th>
<th align="center" valign="top">Weighted F1</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Training</td>
<td align="char" valign="top" char=".">95.83</td>
<td align="char" valign="top" char=".">0.9523</td>
<td align="char" valign="top" char=".">0.9642</td>
<td align="char" valign="top" char=".">0.9574</td>
<td align="char" valign="top" char=".">0.9590</td>
<td align="char" valign="top" char=".">0.9589</td>
</tr>
<tr>
<td align="left" valign="top">Testing</td>
<td align="char" valign="top" char=".">96.53</td>
<td align="char" valign="top" char=".">0.9602</td>
<td align="char" valign="top" char=".">0.9660</td>
<td align="char" valign="top" char=".">0.9658</td>
<td align="char" valign="top" char=".">0.9650</td>
<td align="char" valign="top" char=".">0.9659</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Per-class classification performance on the test dataset.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Class</th>
<th align="left" valign="top">Motor task</th>
<th align="center" valign="top">Support</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Recall</th>
<th align="center" valign="top">F1-score</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">1</td>
<td align="left" valign="top">Baseline with Eyes Open (BEO)</td>
<td align="center" valign="top">21</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
</tr>
<tr>
<td align="left" valign="top">2</td>
<td align="left" valign="top">Closing Left Hand (CLH)</td>
<td align="center" valign="top">14</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
</tr>
<tr>
<td align="left" valign="top">3</td>
<td align="left" valign="top">Closing Right Hand (CRH)</td>
<td align="center" valign="top">18</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">0.944</td>
<td align="char" valign="top" char=".">0.971</td>
</tr>
<tr>
<td align="left" valign="top">4</td>
<td align="left" valign="top">Dorsal Flexion of Left Foot (DLF)</td>
<td align="center" valign="top">18</td>
<td align="char" valign="top" char=".">0.895</td>
<td align="char" valign="top" char=".">0.944</td>
<td align="char" valign="top" char=".">0.919</td>
</tr>
<tr>
<td align="left" valign="top">5</td>
<td align="left" valign="top">Plantar Flexion of Left Foot (PLF)</td>
<td align="center" valign="top">16</td>
<td align="char" valign="top" char=".">0.833</td>
<td align="char" valign="top" char=".">0.938</td>
<td align="char" valign="top" char=".">0.882</td>
</tr>
<tr>
<td align="left" valign="top">6</td>
<td align="left" valign="top">Dorsal Flexion of Right Foot (DRF)</td>
<td align="center" valign="top">16</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
</tr>
<tr>
<td align="left" valign="top">7</td>
<td align="left" valign="top">Plantar Flexion of Right Foot (PRF)</td>
<td align="center" valign="top">21</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">1.000</td>
</tr>
<tr>
<td align="left" valign="top">8</td>
<td align="left" valign="top">Rest</td>
<td align="center" valign="top">20</td>
<td align="char" valign="top" char=".">1.000</td>
<td align="char" valign="top" char=".">0.900</td>
<td align="char" valign="top" char=".">0.947</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To rigorously justify the selection of 8 hidden layers in the proposed decremental GMDH architecture, we conducted an extended ablation study evaluating network depths from 1 to 12 layers (<xref ref-type="fig" rid="fig8">Figure 8</xref>). As illustrated, classification accuracy on the test set progressively increases with depth up to 8 layers, reaching a peak of 96.5%. This improvement reflects the benefits of hierarchical feature abstraction enabled by additional layers, allowing the self-organizing polynomial neurons to capture increasingly complex nonlinear relationships in the EEG-derived features. However, further increasing the depth beyond 8 layers results in a sharp decline in test accuracy (e.g., dropping to 94.2% at 9 layers, 89.1% at 10 layers, 83.7% at 11 layers, and 78.4% at 12 layers). This severe performance degradation is a clear indicator of overfitting, which is expected given the limited dataset size (only 480 samples). Deeper networks introduce unnecessary model complexity (higher parameter counts and inference latency) without corresponding generalization gains, as the GMDH&#x2019;s self-organizing mechanism begins to fit noise rather than meaningful patterns when data is scarce. The decremental neuron arrangement (40&#x202F;&#x2192;&#x202F;5 neurons) already mitigates redundancy, but excessive depth exacerbates overfitting risks on small datasets. Therefore, 8 layers were selected as the optimal configuration, achieving the highest accuracy.</p>
<fig position="float" id="fig8">
<label>Figure 8</label>
<caption>
<p>Impact of the number of hidden layers on classification accuracy in the GMDH network.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g008.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart showing classification accuracy on the test set versus number of hidden layers, with accuracy increasing from 20 percent at one layer to a peak of 96.5 percent at eight layers, highlighted with a yellow circle, then decreasing to 78.4 percent at twelve layers. Blue, red, and orange bars represent rising, peak, and declining accuracy respectively, and values are marked above each bar.</alt-text>
</graphic>
</fig>
<p>To further examine the rationality of the hand-crafted feature design, an ablation analysis was conducted to investigate the contribution of different feature subsets to the overall classification performance. The extracted EEG features were grouped into two main categories: frequency-domain features, including band power and dominant frequency, and time-domain statistical features, including mean, standard deviation, median, and peak-to-peak amplitude. As illustrated in <xref ref-type="fig" rid="fig9">Figure 9</xref>, frequency-domain features alone achieve substantially higher accuracy than time-domain features, reflecting the strong relevance of <italic>&#x03BC;</italic> and <italic>&#x03B2;</italic> rhythms in motor-related EEG activity. However, the highest performance is obtained when both feature groups are combined, indicating that time-domain statistics provide complementary information that further enhances classification robustness.</p>
<fig position="float" id="fig9">
<label>Figure 9</label>
<caption>
<p>Ablation study illustrating the contribution of different EEG feature subsets to motor-task classification performance.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g009.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart comparing classification accuracy for three feature sets: frequency-domain only at 93.6 percent, time-domain only at 86.4 percent, and full feature set at 96.5 percent.</alt-text>
</graphic>
</fig>
<p><xref ref-type="table" rid="tab3">Table 3</xref> provides a quantitative comparison between the proposed method and representative EEG classification approaches. Unlike qualitative descriptions of computational load, this comparison reports the number of trainable parameters and inference time per sample, offering a more objective assessment of computational efficiency. While most existing methods rely on deep or hybrid architectures with automatic feature extraction and large parameter counts, the proposed GMDH-based framework achieves superior classification accuracy with three orders of magnitude fewer parameters and substantially lower inference latency. Although manual feature extraction introduces an additional preprocessing step, it is performed offline and does not affect real-time inference, making the overall framework suitable for deployment in resource-constrained and embedded BCI systems.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Comparison of EEG classification methods based on accuracy, computational load, and feature extraction approach.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Study</th>
<th align="left" valign="top">Method</th>
<th align="center" valign="top">Accuracy (%)</th>
<th align="center" valign="top">Number of parameters</th>
<th align="center" valign="top">Inference time (ms/sample)</th>
<th align="left" valign="top">Feature extraction</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">(<xref ref-type="bibr" rid="ref28">Sarma and Thamilarasu, 2024</xref>)</td>
<td align="left" valign="top">Deep CNN</td>
<td align="char" valign="top" char=".">91.2</td>
<td align="center" valign="top">&#x2248;1.2&#x202F;&#x00D7;&#x202F;10<sup>6</sup></td>
<td align="center" valign="top">&#x2248;25&#x2013;30</td>
<td align="left" valign="top">Automatic</td>
</tr>
<tr>
<td align="left" valign="top">(<xref ref-type="bibr" rid="ref15">Flores et al., 2024</xref>)</td>
<td align="left" valign="top">CNN&#x202F;+&#x202F;transfer learning</td>
<td align="char" valign="top" char=".">89.5</td>
<td align="center" valign="top">&#x2248;8.5&#x202F;&#x00D7;&#x202F;10<sup>5</sup></td>
<td align="center" valign="top">&#x2248;20&#x2013;25</td>
<td align="left" valign="top">Automatic</td>
</tr>
<tr>
<td align="left" valign="top">(<xref ref-type="bibr" rid="ref1">Akuthota et al., 2024</xref>)</td>
<td align="left" valign="top">Compact CNN</td>
<td align="char" valign="top" char=".">90.8</td>
<td align="center" valign="top">&#x2248;1.1&#x202F;&#x00D7;&#x202F;10<sup>5</sup></td>
<td align="center" valign="top">&#x2248;8&#x2013;10</td>
<td align="left" valign="top">Automatic</td>
</tr>
<tr>
<td align="left" valign="top">(<xref ref-type="bibr" rid="ref27">Panthadas and Bhuiyan, 2024</xref>)</td>
<td align="left" valign="top">Hybrid CNN&#x2013;SVM</td>
<td align="char" valign="top" char=".">88.7</td>
<td align="center" valign="top">&#x2248;9.0&#x202F;&#x00D7;&#x202F;10<sup>5</sup></td>
<td align="center" valign="top">&#x2248;18&#x2013;22</td>
<td align="left" valign="top">Automatic</td>
</tr>
<tr>
<td align="left" valign="top">(<xref ref-type="bibr" rid="ref12">Devi et al., 2024</xref>)</td>
<td align="left" valign="top">Multimodal DNN</td>
<td align="char" valign="top" char=".">93.5</td>
<td align="center" valign="top">&#x2248;2.3&#x202F;&#x00D7;&#x202F;10<sup>6</sup></td>
<td align="center" valign="top">&#x2248;30&#x2013;40</td>
<td align="left" valign="top">Automatic</td>
</tr>
<tr>
<td align="left" valign="top">(<xref ref-type="bibr" rid="ref37">Ziegelman and Hernandez, 2024</xref>)</td>
<td align="left" valign="top">Neural ODE</td>
<td align="char" valign="top" char=".">92.1</td>
<td align="center" valign="top">&#x2248;6.5&#x202F;&#x00D7;&#x202F;10<sup>5</sup></td>
<td align="center" valign="top">&#x2248;15&#x2013;20</td>
<td align="left" valign="top">Automatic</td>
</tr>
<tr>
<td align="left" valign="top">This work</td>
<td align="left" valign="top">GMDH neural network</td>
<td align="char" valign="top" char=".">96.5</td>
<td align="center" valign="top">996</td>
<td align="center" valign="top">3.21</td>
<td align="left" valign="top">Manual</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To further assess the efficiency of the proposed approach when using handcrafted features, a comparative evaluation with several standard lightweight classifiers was performed. As shown in <xref ref-type="fig" rid="fig10">Figure 10</xref>, the proposed multi-layer GMDH network is compared with Support Vector Machine (SVM), Linear Discriminant Analysis (LDA), k-Nearest Neighbors (k-NN), and a lightweight Multi-Layer Perceptron (MLP). All models and networks were trained and evaluated using the same handcrafted feature set as input and under identical experimental conditions, ensuring a fair and unbiased comparison. The results indicate that the proposed GMDH model achieves the highest classification accuracy of 96.5%, outperforming the conventional classifiers.</p>
<fig position="float" id="fig10">
<label>Figure 10</label>
<caption>
<p>Classification accuracy comparison between the proposed GMDH model and standard lightweight classifiers.</p>
</caption>
<graphic xlink:href="fnins-20-1752176-g010.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Bar chart comparing classification accuracy for five models: Proposed GMDH achieves 96.5 percent, SVM 91.5 percent, LDA 88.2 percent, k-NN 89.6 percent, and MLP 93.1 percent. Chart demonstrates Proposed GMDH as the most accurate model.</alt-text>
</graphic>
</fig>
<p>Although Group Method of Data Handling (GMDH) is not a newly introduced learning paradigm, its application to EEG-based multi-class motor task classification remains largely underexplored, particularly in comparison with contemporary deep learning architectures. The motivation for selecting GMDH in this study is rooted in both theoretical considerations and practical constraints inherent to EEG signal analysis. From a theoretical perspective, EEG signals are characterized by strong nonlinearity, high inter-channel dependency, and limited sample availability. GMDH explicitly models nonlinear feature interactions through polynomial representations, enabling it to capture second-order and higher-order dependencies between EEG-derived features. This inductive bias is well aligned with the physiological nature of EEG, where meaningful information often arises from interactions between spatial channels and spectral components rather than isolated features. In contrast to deep neural networks such as CNNs, TCNs, or deep MLPs, which rely on large-scale data to effectively constrain their hypothesis space, GMDH employs a layer-wise self-organizing mechanism that automatically controls model complexity. Neurons and layers are selected based on external validation criteria, which inherently reduces overfitting and improves generalization in small-to-medium EEG datasets. Methodologically, the novelty of this work does not lie in modifying the original GMDH learning rule, but rather in the task-specific architectural design and integration strategy. We propose a decremental multi-layer GMDH structure tailored to EEG motor task decoding, where the gradual reduction in neuron count enforces progressive feature abstraction while maintaining computational efficiency. When combined with carefully engineered time- and frequency-domain EEG features, this design yields a robust and lightweight classification framework suitable for resource-constrained BCI systems. Therefore, the contribution of this study is positioned as a methodological framework that bridges manual EEG feature engineering with an adaptive polynomial-based learning model, offering a practical alternative to computationally intensive deep learning approaches for EEG-based BCIs.</p>
<p>This research not only advances the scientific understanding of EEG-based signal classification using neural networks but also holds profound practical implications, particularly in the realms of medical technology and neurorehabilitation. By achieving high accuracy in classifying motor-related EEG signals, this study lays the groundwork for significant advancements in the development of BCIs, which could revolutionize the way individuals with motor impairments interact with the world. The core achievement of this study&#x2014;enabling precise interpretation of EEG signals related to specific motor tasks&#x2014;paves the way for BCIs that can translate a user&#x2019;s neural impulses into actual commands to operate assistive devices. Such technology can be life-changing for individuals suffering from paralysis, muscle weakness, or neurological disorders that impair their ability to perform everyday tasks. For example, someone with spinal cord injury could potentially use a BCI to control a robotic arm, wheelchair, or other assistive devices just by thinking about the movement they wish to execute.</p>
<p>This study addresses the critical challenge of balancing computational efficiency and classification accuracy in EEG signal processing. The proposed GMDH neural network, combined with manual feature extraction, significantly reduces computational complexity compared to deep learning approaches, enabling practical deployment on hardware platforms like FPGAs. This capability is crucial for real-time applications in resource-constrained environments, such as portable neurorehabilitation devices and assistive technologies. Extending the evaluation of the proposed framework to additional benchmark EEG datasets, such as the High Gamma Dataset (HGD) and well-established BCI Competition datasets (e.g., BCI Competition IV, Data Set 2a), is considered an important direction for future work to further assess generalizability and facilitate broader comparison with existing EEG-based motor decoding methods (<xref ref-type="bibr" rid="ref29">Schirrmeister, 2017</xref>; <xref ref-type="bibr" rid="ref8">Brunner et al., 2008</xref>).</p>
<p>Despite these strengths, several limitations warrant discussion. First, the reliance on manual feature extraction requires domain expertise and may limit the generalizability of the approach to datasets with varying signal characteristics. Second, while the model achieves state-of-the-art accuracy, its performance has been evaluated primarily on a specific dataset, necessitating further validation on diverse datasets to confirm its robustness. Lastly, the scalability of the proposed methodology for large-scale EEG data remains an area for future investigation.</p>
</sec>
<sec sec-type="conclusions" id="sec6">
<label>6</label>
<title>Conclusion</title>
<p>This research successfully implemented a GMDH neural network to classify EEG signals into eight distinct motor tasks, achieving an impressive accuracy of approximately 96%. The meticulously designed network architecture, featuring one input layer, eight hidden layers with progressively decreasing neuron counts, and a single output neuron, effectively managed the complex patterns inherent in EEG data. The high classification accuracy highlights the effectiveness of the feature extraction techniques employed, which captured essential characteristics of EEG signals related to various motor functions. The robustness of the model was underscored by the confusion matrices for both training and testing phases, which demonstrated high accuracy across most tasks, showing promising potential for real-world applications.</p>
<p>The practical implications of this study are significant, especially in the development of BCIs that can dramatically improve the quality of life for individuals with motor impairments. Future work will aim to refine the EEG feature set and network architecture, expand the dataset to improve the model&#x2019;s generalizability, and explore real-time applications in neurorehabilitation and consumer electronics. As research progresses, integrating these findings into commercially viable solutions will necessitate thorough testing and validation through clinical trials to ensure they meet the practical needs of users while adhering to medical safety standards. The continued exploration at the intersection of AI, machine learning, and neuroscience holds the promise of expanding the boundaries of medical science and enhancing patient care, marking a step toward a future where technology and human health care converge to create more inclusive, effective, and personalized therapies.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec7">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found at: <ext-link xlink:href="https://data.mendeley.com/datasets/x8psbz3f6x/2" ext-link-type="uri">https://data.mendeley.com/datasets/x8psbz3f6x/2</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="sec8">
<title>Ethics statement</title>
<p>This study is based exclusively on the analysis of a publicly available EEG dataset (MILimbEEG). No new data were collected, and no human participants were recruited by the authors for this research. The original data collection was conducted by the creators of the MILimbEEG dataset and was approved by the relevant institutional ethics committee, with informed consent obtained from all participants, as reported in the original dataset publication. Therefore, no additional ethical approval was required for the present secondary data analysis.</p>
</sec>
<sec sec-type="author-contributions" id="sec9">
<title>Author contributions</title>
<p>DL: Resources, Writing &#x2013; original draft, Data curation, Visualization, Conceptualization, Investigation, Methodology, Software. QZ: Validation, Conceptualization, Methodology, Writing &#x2013; original draft, Visualization, Resources, Formal analysis, Software. HuC: Data curation, Methodology, Validation, Writing &#x2013; original draft, Software, Resources. YL: Writing &#x2013; original draft, Software, Resources, Methodology, Validation. HaC: Conceptualization, Visualization, Data curation, Writing &#x2013; original draft, Formal analysis. LL: Investigation, Validation, Resources, Writing &#x2013; original draft, Methodology. AM: Supervision, Formal analysis, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Conceptualization, Validation. GZ: Validation, Supervision, Writing &#x2013; review &#x0026; editing, Software, Writing &#x2013; original draft, Conceptualization. XM: Formal analysis, Writing &#x2013; review &#x0026; editing, Resources, Conceptualization, Validation. XQ: Writing &#x2013; review &#x0026; editing, Conceptualization, Validation, Software.</p>
</sec>
<sec sec-type="COI-statement" id="sec10">
<title>Conflict of interest</title>
<p>YL and GZ were employed by Shangyu Institute of Science and Engineering Co. Ltd.</p>
<p>The remaining author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec11">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Akuthota</surname><given-names>S.</given-names></name> <name><surname>Rajkumar</surname><given-names>K.</given-names></name> <name><surname>Janapati</surname><given-names>R.</given-names></name></person-group> (<year>2024</year>). &#x201C;<article-title>Svelte EEG net: cross-session motor imagery classification for BCI-controlled prosthetic arms using novel compact CNN</article-title>&#x201D; in <source>2024 IEEE 3rd world conference on applied intelligence and computing (AIC)</source> (<publisher-name>IEEE</publisher-name>), <fpage>1285</fpage>&#x2013;<lpage>1289</lpage>.</mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Al-Haddad</surname><given-names>L. A.</given-names></name> <name><surname>Jaber</surname><given-names>A. A.</given-names></name></person-group> (<year>2023</year>). <article-title>An intelligent fault diagnosis approach for multirotor UAVs based on deep neural network of multi-resolution transform features</article-title>. <source>Drones</source> <volume>7</volume>:<fpage>82</fpage>. doi: <pub-id pub-id-type="doi">10.3390/drones7020082</pub-id></mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ali</surname><given-names>O.</given-names></name> <name><surname>Saif-ur-Rehman</surname><given-names>M.</given-names></name> <name><surname>Glasmachers</surname><given-names>T.</given-names></name> <name><surname>Iossifidis</surname><given-names>I.</given-names></name> <name><surname>Klaes</surname><given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>ConTraNet: a hybrid network for improving the classification of EEG and EMG signals with limited training data</article-title>. <source>Comput. Biol. Med.</source>:<fpage>107649</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107649</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Amin</surname><given-names>S. U.</given-names></name> <name><surname>Alsulaiman</surname><given-names>M.</given-names></name> <name><surname>Muhammad</surname><given-names>G.</given-names></name> <name><surname>Mekhtiche</surname><given-names>M. A.</given-names></name> <name><surname>Hossain</surname><given-names>M. S.</given-names></name></person-group> (<year>2019</year>). <article-title>Deep learning for EEG motor imagery classification based on multi-layer CNNs feature fusion</article-title>. <source>Futur. Gener. Comput. Syst.</source> <volume>101</volume>, <fpage>542</fpage>&#x2013;<lpage>554</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.future.2019.06.027</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arnould</surname><given-names>A.</given-names></name> <name><surname>Hendricusdottir</surname><given-names>R.</given-names></name> <name><surname>Bergmann</surname><given-names>J.</given-names></name></person-group> (<year>2021</year>). <article-title>The complexity of medical device regulations has increased, as assessed through data-driven techniques</article-title>. <source>PRO</source> <volume>3</volume>, <fpage>314</fpage>&#x2013;<lpage>330</lpage>. doi: <pub-id pub-id-type="doi">10.3390/prosthesis3040029</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Asanza</surname><given-names>V.</given-names></name> <name><surname>Lorente-Leyva</surname><given-names>L. L.</given-names></name> <name><surname>Peluffo-Ord&#x00F3;&#x00F1;ez</surname><given-names>D. H.</given-names></name> <name><surname>Montoya</surname><given-names>D.</given-names></name> <name><surname>Gonzalez</surname><given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>MILimbEEG: a dataset of EEG signals related to upper and lower limb execution of motor and motor imagery tasks</article-title>. <source>Data Brief</source> <volume>50</volume>:<fpage>109540</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.dib.2023.109540</pub-id>, <pub-id pub-id-type="pmid">37727590</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baygin</surname><given-names>M.</given-names></name> <name><surname>Barua</surname><given-names>P. D.</given-names></name> <name><surname>Dogan</surname><given-names>S.</given-names></name> <name><surname>Tuncer</surname><given-names>T.</given-names></name> <name><surname>Key</surname><given-names>S.</given-names></name> <name><surname>Acharya</surname><given-names>U. R.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>A hand-modeled feature extraction-based learning network to detect grasps using sEMG signal</article-title>. <source>Sensors</source> <volume>22</volume>:<fpage>2007</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s22052007</pub-id>, <pub-id pub-id-type="pmid">35271154</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Brunner</surname><given-names>C.</given-names></name> <name><surname>Leeb</surname><given-names>R.</given-names></name> <name><surname>M&#x00FC;ller-Putz</surname><given-names>G. R.</given-names></name> <name><surname>Schl&#x00F6;gl</surname><given-names>A.</given-names></name> <name><surname>Pfurtscheller</surname><given-names>G.</given-names></name></person-group> (<year>2008</year>). <source>BCI Competition 2008 &#x2013; Graz data set A</source>: <publisher-name>Graz University of Technology</publisher-name>.</mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Choo</surname><given-names>S.</given-names></name> <name><surname>Park</surname><given-names>H.</given-names></name> <name><surname>Kim</surname><given-names>S.</given-names></name> <name><surname>Park</surname><given-names>D.</given-names></name> <name><surname>Jung</surname><given-names>J.-Y.</given-names></name> <name><surname>Lee</surname><given-names>S.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Effectiveness of multi-task deep learning framework for EEG-based emotion and context recognition</article-title>. <source>Expert Syst. Appl.</source> <volume>227</volume>:<fpage>120348</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2023.120348</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Constantine</surname><given-names>A.</given-names></name> <name><surname>Asanza</surname><given-names>V.</given-names></name> <name><surname>Loayza</surname><given-names>F. R.</given-names></name> <name><surname>Pel&#x00B4;</surname><given-names>E.</given-names></name> <name><surname>aez</surname><given-names>D.</given-names></name> <name><surname>Peluffo-Ordo&#x2019;nez</surname></name></person-group> (<year>2021</year>). <article-title>BCI system using a novel processing technique based on electrodes selection for hand prosthesis control (Escuela Superior Polit&#x00B4; ecnica del Litoral, ESPOL)</article-title>. <source>IFACPapersOnLine</source> <volume>54</volume>, <fpage>364</fpage>&#x2013;<lpage>369</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ifacol.2021.10.283</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cutipa-Puma</surname><given-names>D. R.</given-names></name> <name><surname>Coaguila-Quispe</surname><given-names>C. G.</given-names></name> <name><surname>Yanyachi</surname><given-names>P. R.</given-names></name></person-group> (<year>2023</year>). <article-title>A low-cost robotic hand prosthesis with apparent haptic sense controlled by electroencephalographic signals</article-title>. <source>HardwareX</source> <volume>14</volume>:<fpage>e00439</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ohx.2023.e00439</pub-id>, <pub-id pub-id-type="pmid">37323804</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Devi</surname><given-names>N. B.</given-names></name> <name><surname>Devi</surname><given-names>S. R.</given-names></name> <name><surname>Ramya</surname><given-names>R.</given-names></name> <name><surname>Jayasudha</surname><given-names>V.</given-names></name> <name><surname>Vinuja</surname><given-names>G.</given-names></name></person-group> (<year>2024</year>). &#x201C;<article-title>Multi-modal integration for motor imagery EEG signal classification combining with DNN and functional near-infrared spectroscopy</article-title>&#x201D; in <source>2024 international conference on intelligent algorithms for computational intelligence systems (IACIS)</source> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>7</lpage>.</mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dickinson</surname><given-names>A.</given-names></name> <name><surname>Diment</surname><given-names>L.</given-names></name> <name><surname>Morris</surname><given-names>R.</given-names></name> <name><surname>Pearson</surname><given-names>E.</given-names></name> <name><surname>Hannett</surname><given-names>D.</given-names></name> <name><surname>Steer</surname><given-names>J.</given-names></name></person-group> (<year>2021</year>). <article-title>Characterising residual limb morphology and prosthetic socket design based on expert clinician practice</article-title>. <source>PRO</source> <volume>3</volume>, <fpage>280</fpage>&#x2013;<lpage>299</lpage>. doi: <pub-id pub-id-type="doi">10.3390/prosthesis3040027</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fatimah</surname><given-names>B.</given-names></name> <name><surname>Singh</surname><given-names>P.</given-names></name> <name><surname>Singhal</surname><given-names>A.</given-names></name> <name><surname>Pachori</surname><given-names>R. B.</given-names></name></person-group> (<year>2021</year>). <article-title>Hand movement recognition from sEMG signals using Fourier decomposition method</article-title>. <source>Biocybern. Biomed. Eng.</source> <volume>41</volume>, <fpage>690</fpage>&#x2013;<lpage>703</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bbe.2021.03.004</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Flores</surname><given-names>C.</given-names></name> <name><surname>Contreras</surname><given-names>M.</given-names></name> <name><surname>Macedo</surname><given-names>I.</given-names></name> <name><surname>Andreu-Perez</surname><given-names>J.</given-names></name></person-group> (<year>2024</year>). <article-title>Transfer learning with active sampling for rapid training and calibration in BCI-P300 across health states and multi-Centre data</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng.</source> <volume>32</volume>:<fpage>794</fpage>&#x2013;<lpage>3803</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TNSRE.2024.3420960</pub-id>, <pub-id pub-id-type="pmid">38949927</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ghinoiu</surname><given-names>B.</given-names></name> <name><surname>Vl&#x0103;d&#x0103;reanu</surname><given-names>V.</given-names></name> <name><surname>Travediu</surname><given-names>A. M.</given-names></name> <name><surname>Vl&#x0103;d&#x0103;reanu</surname><given-names>L.</given-names></name> <name><surname>Pop</surname><given-names>A.</given-names></name> <name><surname>Feng</surname><given-names>Y.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>EEG-based mobile robot control using deep learning and ROS integration</article-title>. <source>Technologies</source> <volume>12</volume>:<fpage>261</fpage>. doi: <pub-id pub-id-type="doi">10.3390/technologies12120261</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Han</surname><given-names>C.</given-names></name></person-group> (<year>2025</year>). <article-title>Mechanism of neural oscillations and their relationship with multiple cognitive functions and mental disorders</article-title>. <source>Front. Neurosci.</source> <volume>18</volume>:<fpage>1543731</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2024.1543731</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ivakhnenko</surname><given-names>A. G.</given-names></name></person-group> (<year>1971</year>). <article-title>Polynomial theory of complex systems</article-title>. <source>IEEE Trans. Syst. Man Cybern.</source> <volume>SMC-1</volume>:<fpage>364e378</fpage>.</mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jon</surname><given-names>H. J.</given-names></name> <name><surname>Jin</surname><given-names>L.</given-names></name> <name><surname>Jung</surname><given-names>H.</given-names></name> <name><surname>Kim</surname><given-names>H.</given-names></name> <name><surname>Kim</surname><given-names>E. Y.</given-names></name></person-group> (<year>2024</year>). <article-title>EEG-RegNet: regressive emotion recognition in continuous VAD space using EEG signals</article-title>. <source>Mathematics</source> <volume>13</volume>:<fpage>87</fpage>. doi: <pub-id pub-id-type="doi">10.3390/math13010087</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kalauzi</surname><given-names>A.</given-names></name> <name><surname>Mati&#x0107;</surname><given-names>Z. A.</given-names></name> <name><surname>Suljovruji&#x0107;</surname><given-names>E.</given-names></name> <name><surname>Boji&#x0107;</surname><given-names>T.</given-names></name></person-group> (<year>2025</year>). <article-title>Detection of respiratory frequency rhythm in human alpha phase shifts: topographic distributions in wake and drowsy states</article-title>. <source>Front. Physiol.</source> <volume>15</volume>:<fpage>1511998</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fphys.2024.1511998</pub-id>, <pub-id pub-id-type="pmid">39835197</pub-id></mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kansal</surname><given-names>S.</given-names></name> <name><surname>Garg</surname><given-names>D.</given-names></name> <name><surname>Upadhyay</surname><given-names>A.</given-names></name> <name><surname>Mittal</surname><given-names>S.</given-names></name> <name><surname>Talwar</surname><given-names>G. S.</given-names></name></person-group> (<year>2023</year>). <article-title>E.E.G. Dl-Amput, design and development of the low-cost prosthesis for rehabilitation of upper limb amputees using deep-learning-based techniques</article-title>. <source>Eng. Appl. Artif. Intell.</source> <volume>126</volume>:<fpage>106990</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.engappai.2023.106990</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Karnam</surname><given-names>N. K.</given-names></name> <name><surname>Dubey</surname><given-names>S. R.</given-names></name> <name><surname>Turlapaty</surname><given-names>A. C.</given-names></name> <name><surname>Gokaraju</surname><given-names>B.</given-names></name></person-group> (<year>2022</year>). <article-title>EMGHandNet: a hybrid CNN and bi-LSTM architecture for hand activity classification using surface EMG signals</article-title>. <source>Biocybern. Biomed. Eng.</source> <volume>42</volume>, <fpage>325</fpage>&#x2013;<lpage>340</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bbe.2022.02.005</pub-id></mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>R.</given-names></name> <name><surname>Ren</surname><given-names>C.</given-names></name> <name><surname>Ge</surname><given-names>Y.</given-names></name> <name><surname>Zhao</surname><given-names>Q.</given-names></name> <name><surname>Yang</surname><given-names>Y.</given-names></name> <name><surname>Shi</surname><given-names>Y.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>MTLFuseNet: a novel emotion recognition model based on deep latent feature fusion of EEG signals and multi-task learning</article-title>. <source>Knowl.-Based Syst.</source> <volume>276</volume>:<fpage>110756</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.knosys.2023.110756</pub-id></mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>M.</given-names></name> <name><surname>Zeng</surname><given-names>X.</given-names></name> <name><surname>Wu</surname><given-names>F.</given-names></name> <name><surname>Chu</surname><given-names>Y.</given-names></name> <name><surname>Wei</surname><given-names>W.</given-names></name> <name><surname>Fan</surname><given-names>M.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Build a bridge between ECG and EEG signals for atrial fibrillation diagnosis using AI methods</article-title>. <source>Comput. Biol. Med.</source> <volume>166</volume>:<fpage>107429</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.107429</pub-id></mixed-citation></ref>
<ref id="ref25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Maher</surname><given-names>A.</given-names></name> <name><surname>Mian Qaisar</surname><given-names>S.</given-names></name> <name><surname>Salankar</surname><given-names>N.</given-names></name> <name><surname>Jiang</surname><given-names>F.</given-names></name> <name><surname>Tadeusiewicz</surname><given-names>R.</given-names></name> <name><surname>P&#x0142;awiak</surname><given-names>P.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Hybrid EEG-fNIRS brain-computer interface based on the non-linear features extraction and stacking ensemble learning</article-title>. <source>Biocybern. Biomed. Eng.</source> <volume>43</volume>, <fpage>463</fpage>&#x2013;<lpage>475</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bbe.2023.05.001</pub-id></mixed-citation></ref>
<ref id="ref26"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mak</surname><given-names>J. N.</given-names></name> <name><surname>McFarland</surname><given-names>D. J.</given-names></name> <name><surname>Vaughan</surname><given-names>T. M.</given-names></name> <name><surname>McCane</surname><given-names>L. M.</given-names></name> <name><surname>Tsui</surname><given-names>P. Z.</given-names></name> <name><surname>Zeitlin</surname><given-names>D. J.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>EEG correlates of P300-based brain&#x2013;computer interface (BCI) performance in people with amyotrophic lateral sclerosis</article-title>. <source>J. Neural Eng.</source> <volume>9</volume>:<fpage>026014</fpage>. doi: <pub-id pub-id-type="doi">10.1088/1741-2560/9/2/026014</pub-id>, <pub-id pub-id-type="pmid">22350501</pub-id></mixed-citation></ref>
<ref id="ref27"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Panthadas</surname><given-names>H.</given-names></name> <name><surname>Bhuiyan</surname><given-names>M. I. H.</given-names></name></person-group> (<year>2024</year>). &#x201C;<article-title>HFD-CCA for SSVEP classification in Mobile environment</article-title>&#x201D; in <source>2024 IEEE international conference on electronics, computing and communication technologies (CONECCT)</source> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation></ref>
<ref id="ref28"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Sarma</surname><given-names>A.</given-names></name> <name><surname>Thamilarasu</surname><given-names>G.</given-names></name></person-group> (<year>2024</year>). &#x201C;<article-title>SCRAPE: Side-Channel attack to reveal alphanumeric passwords from EEG signals</article-title>&#x201D; in <source>2024 international conference on cyber-enabled distributed computing and knowledge discovery (CyberC)</source> (<publisher-name>IEEE</publisher-name>), <fpage>139</fpage>&#x2013;<lpage>145</lpage>.</mixed-citation></ref>
<ref id="ref29"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Schirrmeister</surname><given-names>R. T.</given-names></name></person-group> (<year>2017</year>). <source>High gamma dataset (HGD)</source>: <publisher-name>GitHub repository</publisher-name>.</mixed-citation></ref>
<ref id="ref30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sharma</surname><given-names>N.</given-names></name> <name><surname>Kolekar</surname><given-names>M. H.</given-names></name> <name><surname>Jha</surname><given-names>K.</given-names></name></person-group> (<year>2021</year>). <article-title>EEG based dementia diagnosis using multi-class support vector machine with motor speed cognitive test</article-title>. <source>Biomed. Signal Process. Control</source> <volume>63</volume>:<fpage>102102</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2020.102102</pub-id></mixed-citation></ref>
<ref id="ref31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sharma</surname><given-names>N.</given-names></name> <name><surname>Sharma</surname><given-names>M.</given-names></name> <name><surname>Singhal</surname><given-names>A.</given-names></name> <name><surname>Fatema</surname><given-names>N.</given-names></name> <name><surname>Jadoun</surname><given-names>V. K.</given-names></name> <name><surname>Malik</surname><given-names>H.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>A spatiotemporal feature extraction technique using superlet-CNN fusion for improved motor imagery classification</article-title>. <source>IEEE Access.</source> <volume>13</volume>, <fpage>2141</fpage>&#x2013;<lpage>2151</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2024.3517639</pub-id></mixed-citation></ref>
<ref id="ref32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shi</surname><given-names>W.-T.</given-names></name> <name><surname>Lyu</surname><given-names>Z.-J.</given-names></name> <name><surname>Tang</surname><given-names>S.-T.</given-names></name> <name><surname>Chia</surname><given-names>T.-L.</given-names></name> <name><surname>Yang</surname><given-names>C.-Y.</given-names></name></person-group> (<year>2018</year>). <article-title>A bionic hand controlled by hand gesture recognition based on surface EMG signals: a preliminary study</article-title>. <source>Biocybern. Biomed. Eng.</source> <volume>38</volume>, <fpage>126</fpage>&#x2013;<lpage>135</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bbe.2017.11.001</pub-id></mixed-citation></ref>
<ref id="ref33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname><given-names>K.</given-names></name> <name><surname>Singha</surname><given-names>N.</given-names></name> <name><surname>Jaswal</surname><given-names>G.</given-names></name> <name><surname>Bhalaik</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>A novel CNN with sliding window technique for enhanced classification of MI-EEG sensor data</article-title>. <source>IEEE Sensors J.</source> <fpage>1</fpage>&#x2013;<fpage>1</fpage>. doi: <pub-id pub-id-type="doi">10.1109/JSEN.2024.3515252</pub-id></mixed-citation></ref>
<ref id="ref34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tuncer</surname><given-names>E.</given-names></name> <name><surname>Bolat</surname><given-names>E. D.</given-names></name></person-group> (<year>2022</year>). <article-title>Channel based epilepsy seizure type detection from electroencephalography (EEG) signals with machine learning techniques</article-title>. <source>Biocybern. Biomed. Eng.</source> <volume>42</volume>, <fpage>575</fpage>&#x2013;<lpage>595</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bbe.2022.04.004</pub-id></mixed-citation></ref>
<ref id="ref35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tuncer</surname><given-names>T.</given-names></name> <name><surname>Dogan</surname><given-names>S.</given-names></name> <name><surname>Subasi</surname><given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Surface EMG signal classification using ternary pattern and discrete wavelet transform based feature extraction for hand movement recognition</article-title>. <source>Biomed. Signal Process. Control</source> <volume>58</volume>:<fpage>101872</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2020.101872</pub-id></mixed-citation></ref>
<ref id="ref36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yousefipour</surname><given-names>B.</given-names></name> <name><surname>Rajabpour</surname><given-names>V.</given-names></name> <name><surname>Abdoljabbari</surname><given-names>H.</given-names></name> <name><surname>Sheykhivand</surname><given-names>S.</given-names></name> <name><surname>Danishvar</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>An ensemble deep learning approach for EEG-based emotion recognition using multi-class CSP</article-title>. <source>Biomimetics</source> <volume>9</volume>:<fpage>761</fpage>. doi: <pub-id pub-id-type="doi">10.3390/biomimetics9120761</pub-id>, <pub-id pub-id-type="pmid">39727765</pub-id></mixed-citation></ref>
<ref id="ref37"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Ziegelman</surname><given-names>L.</given-names></name> <name><surname>Hernandez</surname><given-names>M. E.</given-names></name></person-group> (<year>2024</year>). &#x201C;<article-title>Application of a neural ODE to classify motion control strategy using EEG</article-title>&#x201D; in <source>2024 46th annual international conference of the IEEE engineering in medicine and biology society (EMBC)</source> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>4</lpage>.</mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/621567/overview">Marta Molinas</ext-link>, Norwegian University of Science and Technology, Trondheim, Norway</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3191972/overview">Shuojin Yang</ext-link>, Tsinghua University, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3318365/overview">Kamal Singh</ext-link>, National Institute of Technology Delhi, India</p>
</fn>
</fn-group>
</back>
</article>