<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Comput. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Computational Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Comput. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-5188</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fncom.2026.1731812</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Metaheuristic-driven dual-layer model for classifying Alzheimer&#x00027;s disease stages</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Anicin</surname> <given-names>Luka</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/3300725"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Andjelic</surname> <given-names>Svetlana</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Markovic Blagojevic</surname> <given-names>Marija</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<uri xlink:href="https://loop.frontiersin.org/people/3219061"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bulaja</surname> <given-names>Dejan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zivkovic</surname> <given-names>Miodrag</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<uri xlink:href="https://loop.frontiersin.org/people/2184974"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Zivkovic</surname> <given-names>Tamara</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<uri xlink:href="https://loop.frontiersin.org/people/3360656"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Antonijevic</surname> <given-names>Milos</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<uri xlink:href="https://loop.frontiersin.org/people/2249150"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Bacanin</surname> <given-names>Nebojsa</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<uri xlink:href="https://loop.frontiersin.org/people/1573164"/>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Faculty of Informatics and Computing, Singidunum University</institution>, <city>Belgrade</city>, <country country="rs">Serbia</country></aff>
<aff id="aff2"><label>2</label><institution>Saveetha School of Engineering, Saveetha Institute of Medical and Technical Sciences (SIMATS), Thandalam, Chennai</institution>, <city>Tamilnadu</city>, <country country="in">India</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Nebojsa Bacanin, <email xlink:href="mailto:nbacanin@singidunum.ac.rs">nbacanin@singidunum.ac.rs</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-03">
<day>03</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1731812</elocation-id>
<history>
<date date-type="received">
<day>24</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>28</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>07</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Anicin, Andjelic, Markovic Blagojevic, Bulaja, Zivkovic, Zivkovic, Antonijevic and Bacanin.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Anicin, Andjelic, Markovic Blagojevic, Bulaja, Zivkovic, Zivkovic, Antonijevic and Bacanin</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-03">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Accurate determination of the progression phase of Alzheimer&#x00027;s disease (AD) is crucial for timely clinical decision-making, improved patient management, and personalized therapeutic interventions. However, reliably distinguishing between multiple disease stages using neuroimaging data remains a challenging task.</p></sec>
<sec>
<title>Methods</title>
<p>This study proposes an advanced machine learning framework for multi-stage AD classification using magnetic resonance imaging (MRI) data. The architecture follows a two-tier design. In the first stage, convolutional neural networks (CNNs) are employed to extract deep and discriminative feature representations from MRI images. In the second stage, these features are classified using ensemble learning models, specifically XGBoost and LightGBM. Metaheuristic optimization strategies are applied to further enhance model performance. The proposed framework was evaluated using a publicly available Alzheimer&#x00027;s disease dataset under three different experimental configurations.</p></sec>
<sec>
<title>Results</title>
<p>Experimental results demonstrate that the proposed approach effectively addresses the multi-class classification problem across different AD progression stages. The optimized models achieved a maximum classification accuracy of 89.55%, indicating robust predictive performance and strong generalization capability.</p></sec>
<sec>
<title>Discussion</title>
<p>To improve transparency and clinical relevance, explainable artificial intelligence (XAI) techniques were incorporated to interpret model predictions and highlight feature importance. The results provide meaningful insights into neuroimaging biomarkers associated with AD progression and support the development of more interpretable and trustworthy diagnostic systems. Overall, the proposed framework contributes to improved data-driven decision support and offers a promising direction for future Alzheimer&#x00027;s disease diagnosis and staging research.</p></sec></abstract>
<kwd-group>
<kwd>Alzheimer&#x00027;s disease</kwd>
<kwd>convolutional neural networks</kwd>
<kwd>LightGBM</kwd>
<kwd>machine learning</kwd>
<kwd>metaheuristics algorithms</kwd>
<kwd>MRI</kwd>
<kwd>variable neighborhood search</kwd>
<kwd>XGBoost</kwd>
</kwd-group>
<funding-group>
  <funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was supported by the Science Fund of the Republic of Serbia, Grant No. 7373, characterizing crises-caused air pollution alternations using an artificial intelligence-based framework (crAIRsis) and Grant No. 7502, Intelligent Multi-Agent Control and Optimization applied to Green Buildings and Environmental Monitoring Drone Swarms (ECOSwarm).</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="10"/>
<equation-count count="6"/>
<ref-count count="82"/>
<page-count count="25"/>
<word-count count="15437"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value></meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Alzheimer&#x00027;s disease (AD) is one of the most debilitating neurodegenerative diseases of modern times, progressively affecting memory, cognition, and the ability to perform routine tasks. Data from the World Health Organization indicate that approximately 57 million people worldwide currently live with dementia, with AD accounting for nearly 60%&#x02013;70% of all reported cases. Each year, millions of new diagnoses are registered<xref ref-type="fn" rid="fn0003"><sup>1</sup></xref> (<xref ref-type="bibr" rid="B60">Rajan et al., 2021</xref>). These statistics emphasize the urgent need for more effective techniques capable of identifying the disease in its earliest phases and tracking its evolution with improved accuracy and consistency (<xref ref-type="bibr" rid="B79">Zhang et al., 2025</xref>).</p>
<p>Neuropathological evidence suggests that AD-associated brain degeneration can begin in midlife, although clinical manifestations typically emerge after the age of 65. As the global elderly population continues to expand, the incidence of AD is increasing at an alarming rate (<xref ref-type="bibr" rid="B69">Tajahmadi et al., 2023</xref>). Present diagnostic practices combine neurological assessments, cognitive and psychometric evaluations, neuroimaging modalities such as magnetic resonance imaging (MRI) and positron emission tomography (PET), together with cerebrospinal fluid and blood biomarker testing. However, these approaches are often expensive, time-consuming, and inefficient, revealing the pressing demand for more rapid and reliable diagnostic methodologies. The difficulty is particularly evident when trying to recognize the early onset of AD or mild cognitive impairment (MCI), where precise detection remains both challenging and essential to enable timely therapeutic interventions, preventive measures, and support mechanisms that aid patients and caregivers (<xref ref-type="bibr" rid="B56">Prasath and Sumathi, 2023</xref>).</p>
<p>Reliable classification of AD progression requires an exact evaluation of cerebral morphology, particularly through volumetric measurements. Although manual segmentation techniques can produce high precision, they are extremely tedious and unfeasible for large-scale implementation, leading to a transition to automated computational strategies for both clinical and research environments (<xref ref-type="bibr" rid="B24">Guenette et al., 2018</xref>). Within this framework, the adoption of artificial intelligence (AI) in healthcare has substantially improved diagnostic accuracy, enhanced treatment planning, and facilitated more effective healthcare delivery by reducing expenses and improving patient outcomes (<xref ref-type="bibr" rid="B61">Raza et al., 2025</xref>). As a result, AD categorization has become a vibrant area of scientific inquiry over the past decade. A considerable number of contemporary studies focus on deep learning (DL), most notably convolutional neural networks (CNNs) (<xref ref-type="bibr" rid="B23">Gu et al., 2018</xref>), which have become the dominant architecture, while others employ more classical machine learning (ML) techniques. CNNs have exhibited remarkable capability to capture distinctive patterns from neuroimaging data such as magnetic resonance imaging and PET. Similarly, gradient boosting algorithms including AdaBoost (<xref ref-type="bibr" rid="B27">Hastie et al., 2009</xref>) and CatBoost (<xref ref-type="bibr" rid="B57">Prokhorenkova et al., 2018</xref>) have achieved competitive results when working with structured datasets. Numerous investigations have reported impressive success using CNNs for AD stage prediction (<xref ref-type="bibr" rid="B66">Shamrat et al., 2023</xref>; <xref ref-type="bibr" rid="B18">Degadwala et al., 2023</xref>), while others have explored alternative ML paradigms (<xref ref-type="bibr" rid="B51">Nawaz et al., 2021</xref>).</p>
<p>Despite their broad application, ML-driven models face several fundamental obstacles. Their performance can severely degrade due to issues like biased or low-quality data, suboptimal algorithm selection, and inadequate hyperparameter adjustment. Models trained on imbalanced, noisy, or poorly curated datasets often produce erratic and unreliable output, underscoring the importance of developing high-quality and representative training collections. Furthermore, the efficiency of each ML algorithm is inherently context-dependent, and individual models may demonstrate drastically different effectiveness depending on the specific dataset and problem domain. Hyperparameters also exert a critical role, since determining their ideal configurations demands systematic and often computationally demanding fine-tuning. This difficulty aligns with Wolpert&#x00027;s no free lunch (NFL) theorem (<xref ref-type="bibr" rid="B74">Wolpert and Macready, 1997</xref>), which asserts that no single algorithm outperforms all others in all types of problem. Consequently, each method must be adapted and optimized for its different applications. Nevertheless, the hyperparameter optimization process itself is notoriously intricate and is generally regarded as NP-hard. As both data complexity and the dimensionality of the search space increase, identifying near-optimal configurations becomes computationally burdensome and frequently infeasible. Traditional optimization approaches often fail to yield satisfactory results under such challenging circumstances.</p>
<p>To overcome these limitations, metaheuristic optimization techniques have emerged as a potent alternative. These algorithms are particularly proficient in traversing vast and complex search landscapes to approximate optimal solutions when exact optimization becomes computationally unattainable. Due to their flexibility and effectiveness, metaheuristics are especially advantageous for hyperparameter tuning. By producing high-quality approximations, they substantially improve the performance and robustness of ML models across a broad range of practical domains.</p>
<p>To address the challenge of categorization of the AD stage, this research proposes a novel dual-layered framework inspired by methodologies that have demonstrated outstanding results in domains such as software testing (<xref ref-type="bibr" rid="B53">Petrovic et al., 2024</xref>; <xref ref-type="bibr" rid="B71">Villoth J. P. et al., 2025</xref>), intrusion detection (<xref ref-type="bibr" rid="B4">Antonijevic et al., 2025</xref>), and web security improvement (<xref ref-type="bibr" rid="B29">Jovanovic et al., 2023</xref>). In the first stage, a CNN is used to extract distinctive and meaningful features from the MRI scans. Building upon earlier studies (<xref ref-type="bibr" rid="B53">Petrovic et al., 2024</xref>; <xref ref-type="bibr" rid="B71">Villoth J. P. et al., 2025</xref>) that showed how replacing the final dense layer of CNNs with advanced ensemble learners can considerably improve model accuracy, the proposed framework substitutes this concluding layer with XGBoost (<xref ref-type="bibr" rid="B14">Chen and Guestrin, 2016</xref>) and LightGBM (<xref ref-type="bibr" rid="B33">Ke et al., 2017</xref>) classifiers.</p>
<p>Rather than relying exclusively on CNN-based end-to-end classification, the proposed architecture leverages the convolutional layers for hierarchical feature abstraction, after which the obtained deep representations are passed to a secondary classification phase handled by ensemble algorithms. To further enhance the overall performance of the model, metaheuristic optimization techniques are integrated to fine-tune hyperparameters at both levels, ensuring optimal adjustment of the CNN feature extractor and the ensemble classifiers. This hybrid architecture combines CNN ability to capture complex features, the robust decision fusion capacity of gradient boosting models, and the adaptive exploration efficiency of metaheuristics. The synergy achieved through this integration of deep learning, ensemble-based classification, and intelligent hyperparameter optimization results in improved predictive performance and increased computational efficiency in AD stage detection.</p>
<p>In the proposed model, hyperparameter tuning is carried out through a custom variation of the well-known variable neighborhood search (VNS) algorithm (<xref ref-type="bibr" rid="B49">Mladenovi&#x00107; and Hansen, 1997</xref>). The selection of VNS followed extensive comparative experiments involving multiple optimization techniques, consistent with the rationale of the NFL theorem (<xref ref-type="bibr" rid="B74">Wolpert and Macready, 1997</xref>), which states that no single optimizer consistently outperforms all others in every class of problems. Although several other state-of-the-art metaheuristic approaches were also tested, preliminary experiments on smaller AD classification datasets indicated that VNS consistently achieved stable and high-quality solutions. These findings highlighted the robustness, adaptability, and suitability of the algorithm for complex optimization landscapes, motivating its implementation as the primary optimization mechanism in this study. By adapting VNS to the specific needs of AD stage prediction, the framework achieves more efficient tuning of both CNN and ensemble components, leading to superior predictive accuracy and higher reliability of the system.</p>
<p>Moreover, this research fills an important methodological gap, as the integration of CNN-based feature extraction with gradient boosting classifiers within a coordinated, multi-tiered framework refined through advanced metaheuristic optimization has not yet been systematically explored for this particular task. Taking into consideration all these aspects, the main methodological innovations and novel contributions of this work can be summarized as follows:</p>
<list list-type="bullet">
<list-item><p>Development of a hybrid AI-based analytical framework that combines feature extraction, deep learning, and conventional machine learning methods, specifically designed for accurate classification of Alzheimer&#x00027;s disease stages based on MRI-derived data.</p></list-item>
<list-item><p>Construction of a two-phase classification strategy in which CNNs are used to hierarchically extract deep neuroimaging features, which are subsequently refined through classical ML algorithms to achieve precise differentiation among AD stages.</p></list-item>
<list-item><p>Implementation of computationally efficient models that utilize lightweight CNN architectures coupled with shallow XGBoost and LightGBM classifiers, each optimized with minimal hyperparameter complexity, thus allowing potential deployment in low-resource settings such as embedded systems and portable diagnostic platforms.</p></list-item>
<list-item><p>Formulation of a customized optimization method inspired by the standard VNS algorithm, specifically adapted to systematically fine-tune the network and classifier parameters, thus improving the classification precision at both hierarchical levels of the proposed system.</p></list-item>
<list-item><p>Incorporation of explainable artificial intelligence (XAI) techniques to ensure transparent interpretation of the model&#x00027;s decision-making process, focusing on importance of characteristics and contribution analysis.</p></list-item>
</list>
<p>The remainder of this paper is organized as follows. Section 2 introduces the fundamental theoretical background and reviews the principal methodological paradigms that serve as the basis for the proposed framework. Section 3 provides a detailed description of the algorithmic design and explains the two-stage classification approach developed to identify the progression of AD using MRI data. Section 4 outlines the complete experimental configuration, including all parameter settings necessary to guaranty full reproducibility. Section 5 reports the empirical results obtained from the experiments conducted, while Section 6 presents a comprehensive statistical assessment and interpretive discussion of these results. Finally, Section 7 summarizes the main contributions of this study and suggests possible directions for future research within the field.</p></sec>
<sec id="s2">
<label>2</label>
<title>Related works</title>
<p>AD is a progressive neurodegenerative condition characterized by a steady deterioration in memory, cognitive performance, and behavioral control. Early and accurate detection of AD is widely regarded as fundamental for the successful clinical management and design of targeted therapeutic interventions (<xref ref-type="bibr" rid="B68">Singh et al., 2024</xref>). During the past decade, ML and DL techniques have emerged as transformative approaches to identify and stage AD, utilizing multimodal sources such as magnetic resonance imaging, PET, and biochemical biomarkers. Recent computational and electrophysiological studies have contributed substantive insights into AD mechanisms relevant to this problem. For example, <xref ref-type="bibr" rid="B32">Kaushik et al. (2024)</xref> developed a computational model of hippocampal pyramidal neurons to investigate how &#x003B2;-amyloid-induced disruptions in calcium-dependent ionic channels affect theta rhythm dynamics, linking ionic dysregulation to functional impairment in memory-related neural circuits. Complementing such modeling approaches, studies like (<xref ref-type="bibr" rid="B6">Babiloni et al., 2020</xref>; <xref ref-type="bibr" rid="B78">Yu et al., 2021</xref>; <xref ref-type="bibr" rid="B16">Costanzo et al., 2024</xref>) reviewed the role of electrophysiological biomarkers, including EEG and MEG, in characterizing neural synchronization and connectivity changes associated with Alzheimer&#x00027;s pathology, underscoring the value of real-time neurophysiological measurements for understanding disease progression and potential diagnostic markers.</p>
<p>A growing body of literature underscores the value of hybrid analytical frameworks and feature-driven deep models, which have significantly improved diagnostic accuracy (<xref ref-type="bibr" rid="B15">Chen et al., 2022</xref>). For example, <xref ref-type="bibr" rid="B5">Arya et al. (2023)</xref> conducted an extensive review of ML and DL-based approaches to differentiate cognitively normal individuals from AD patients in the early stages of the disease. Their findings identified MRI and PET as the most commonly applied imaging modalities and compared classification performance between various algorithms. Similarly, <xref ref-type="bibr" rid="B80">Zhao et al. (2023)</xref> analyzed the comparative effectiveness of traditional ML methods for the prediction of AD using MRI data. Their evaluation included support vector machines, random forests, CNNs, autoencoders, and transformer-based models, addressing trade-offs between preprocessing pipelines, conventional ML methods, and modern DL architectures. In addition, they discussed the advantages and limitations of different input representations, offering valuable insights into the development of more effective AD diagnostic models.</p>
<p>In another example, <xref ref-type="bibr" rid="B28">Helaly et al. (2022)</xref> introduced a framework aimed at the early detection and stage-specific categorization of AD from medical images. Their method employed CNNs to perform pairwise binary classifications between AD stages, effectively decomposing the multi-class classification problem into smaller binary tasks. Two methodological configurations were analyzed: one used standard CNN models to process both 2D and 3D neuroimaging data, while the other leveraged transfer learning with pre-trained networks such as VGG19 to enhance prediction accuracy. In a complementary direction, <xref ref-type="bibr" rid="B63">Sarkar (2025)</xref> explored the integration of deep learning with gait analysis to improve diagnostic robustness. They combined CNNs and recurrent neural networks (RNNs) to differentiate between cognitively healthy individuals and those at risk using motion data collected from wearable sensors and motion capture technologies, highlighting the potential of non-invasive, movement-based biomarkers in early AD detection.</p>
<p>A continuing issue in DL-based AD diagnostics is their tendency to operate as opaque black box systems, producing outputs without clear interpretability. To confront this limitation, <xref ref-type="bibr" rid="B10">Bloch et al. (2024)</xref> conducted a systematic investigation to improve the transparency of the model by identifying the neuroanatomical regions activated during inference and comparing these with the interpretability output of traditional ML models. Their work used a wide range of explainability techniques, providing a thorough assessment of interpretability within AD diagnostic systems. Similarly, <xref ref-type="bibr" rid="B45">Menagadevi et al. (2024)</xref> stressed the critical role of preprocessing and image enhancement in increasing classification accuracy. Their review discussed key MRI preprocessing steps such as denoising, illumination normalization, and intensity correction, followed by segmentation techniques to isolate regions of interest, feature extraction methods, and the application of various ML and DL algorithms for AD classification, thus presenting a comprehensive methodological overview from data preparation to classification.</p>
<p>Beyond the binary challenge of distinguishing the presence of AD, stratification of disease progression stages has become a prominent research focus. Both deep learning and conventional ML techniques generally require large datasets to form stable feature representations; however, this necessity introduces issues such as overfitting and class imbalance. To mitigate these challenges, several studies have adopted transfer learning and hybrid modeling strategies. For example, <xref ref-type="bibr" rid="B51">Nawaz et al. (2021)</xref> developed a deep feature-based AD staging approach, where features extracted from a pre-trained AlexNet model were subsequently classified using traditional ML algorithms like random forests, k-nearest neighbors and support vector machines. Similarly, <xref ref-type="bibr" rid="B52">Nguyen et al. (2022)</xref> proposed an ensemble model that merged deep and traditional learning, employing a 3D-ResNet to capture volumetric MRI patterns and an XGBoost classifier to identify discriminative voxel-level signals. Another approach was presented in <xref ref-type="bibr" rid="B43">Mahanty et al. (2024b)</xref>, where the authors developed an ensemble DL approach using an enhanced Xception model and snapshot blending to achieve highly accurate multi-class AD detection from brain MRI scans. Transfer-learning models were examined in <xref ref-type="bibr" rid="B42">Mahanty et al. (2024a)</xref> to classify AD from medical imaging data, demonstrating improved detection performance compared to individual models.</p>
<p>Further advancing this direction, <xref ref-type="bibr" rid="B21">El-Sappagh et al. (2022)</xref> introduced a two-phase multimodal DL framework to track AD progression. The first stage used multiclass classification to assign diagnostic labels, while the second applied regression analysis to estimate the time-to-conversion from mild cognitive impairment (MCI) to AD, providing both categorical and temporal insight. Building on that work, <xref ref-type="bibr" rid="B20">El-Assy et al. (2024)</xref> presented a CNN-based system trained on MRI scans that utilized two separate CNN branches with distinct kernel dimensions and pooling strategies, integrated through a shared output layer to facilitate multi-class categorization across three to five disease stages.</p>
<p>Additional research has focused on refining CNN architectures for more granular disease stratification. <xref ref-type="bibr" rid="B64">Sava&#x0015F; (2022)</xref> evaluated 29 pre-trained CNN networks to classify MRI scans into three categories: cognitively normal, moderate cognitive impairment, and AD. Extending these findings, <xref ref-type="bibr" rid="B66">Shamrat et al. (2023)</xref> developed AlzheimerNet, a specialized CNN architecture capable of differentiating between five stages of AD in addition to a control group. Their approach incorporated contrast limited adaptive histogram equalization (CLAHE) to improve MRI image quality prior to classification. Finally, <xref ref-type="bibr" rid="B22">Givian et al. (2024)</xref> proposed a feature-based ML framework that employs structural MRI features with several classifiers, including random forests, k-nearest neighbors, support vector machines, decision trees, and multilayer perceptrons, to segment disease phases, thus offering comparative insights into the respective strengths of traditional ML and deep feature-based methods.</p>
<sec>
<label>2.1</label>
<title>Technology background</title>
<p>CNNs (<xref ref-type="bibr" rid="B23">Gu et al., 2018</xref>) have become one of the most transformative architectures in artificial intelligence, largely due to their outstanding capabilities in image classification, pattern recognition, and object detection. Over time, their use has expanded far beyond visual perception, extending into fields such as natural language processing, biomedical imaging, and environmental modeling. The conceptual basis of CNNs draws inspiration from the hierarchical organization of the mammalian visual cortex, in which sensory information is processed through successive layers of increasing abstraction. In artificial models, this hierarchical mechanism is reproduced as the data move through multiple interconnected layers, where nonlinear activation functions, such as the rectified linear unit (ReLU), hyperbolic tangent (tanh), and sigmoid, allow the network to model complex nonlinear relationships among features.</p>
<p>A typical deep CNN is composed of several distinct types of layers: convolutional, activation, pooling, and fully connected layers. In the convolutional layer, a set of trainable filters (kernels) systematically traverse the input, performing localized dot-product computations between filter weights and corresponding input regions. The result is a group of feature maps that capture local patterns and spatial hierarchies. These feature maps are subsequently passed through activation layers, which introduce the nonlinearity necessary for learning complex dependencies. Among all activation functions, ReLU is the most widely used due to its computational simplicity and effectiveness in mitigating the vanishing gradient issue (<xref ref-type="bibr" rid="B50">Nair and Hinton, 2010</xref>).</p>
<p>The pooling layers perform spatial subsampling to reduce the dimensionality of the feature map while preserving the most important information. Max pooling, the most common approach, selects the highest value within each neighborhood, usually achieving a reduction of 70% to 80% in dimensionality without a considerable loss of relevant information. The abstract, high-level features obtained after a series of convolutional and pooling stages are finally processed by fully connected layers, which act as the decision-making component of the network, transforming learned features into the final class predictions.</p>
<p>CNNs have shown exceptional flexibility in a wide range of computer vision tasks (<xref ref-type="bibr" rid="B9">Bhatt et al., 2021</xref>), including face recognition (<xref ref-type="bibr" rid="B13">Budiman et al., 2023</xref>), document and handwriting analysis (<xref ref-type="bibr" rid="B26">Hasib et al., 2023</xref>), and medical image classification for diagnosis of diseases and clinical screening (<xref ref-type="bibr" rid="B62">Salehi et al., 2023</xref>; <xref ref-type="bibr" rid="B58">Purkovic et al., 2024</xref>). Beyond medical applications, CNN architectures have also been used successfully in climate and environmental studies, particularly to model global weather dynamics and predict extreme meteorological events (<xref ref-type="bibr" rid="B31">Kareem et al., 2021</xref>).</p>
<p>XGBoost (Extreme Gradient Boosting) is a machine learning algorithm based on high-performance gradient boosting (<xref ref-type="bibr" rid="B14">Chen and Guestrin, 2016</xref>). It constructs a ensemble of decision trees in a sequential manner, where each new tree corrects the errors of the preceding, resulting in enhanced predictive precision and model robustness. Recognized for its scalability and speed, XGBoost incorporates regularization to reduce overfitting and supports parallelized learning, making it well-suited for large, high-dimensional datasets. Its adaptability allows it to handle both classification and regression tasks, with several tunable hyperparameters that significantly affect performance. Thanks to its efficiency, reliability, and interpretability, XGBoost is widely adopted in cybersecurity, IoT data analytics, and other real-world applications that require fast and accurate data-driven predictions.</p>
<p>LightGBM (<xref ref-type="bibr" rid="B33">Ke et al., 2017</xref>), an open-source framework developed by Microsoft, is specifically designed for large-scale high-speed data processing. Its efficiency arises from techniques such as Gradient-based One-Side Sampling (GOSS), which preserves samples with larger gradient magnitudes to maintain accuracy, and Exclusive Feature Bundling (EFB), which combines mutually exclusive features to reduce dimensionality and computational load. These mechanisms allow LightGBM to train significantly faster and with lower memory consumption than traditional boosting algorithms, making it highly effective for massive datasets with numerous features.</p>
<p>This framework has proven reliable in a range of predictive problems, including classification, regression, and anomaly detection, and has found applications in structural analysis (<xref ref-type="bibr" rid="B39">Li et al., 2023</xref>), financial prediction (<xref ref-type="bibr" rid="B73">Wang et al., 2022</xref>), and defect identification (<xref ref-type="bibr" rid="B37">Lao et al., 2023</xref>). LightGBM also supports parallel and distributed computation, enabling seamless scalability in modern computing environments. Its main hyperparameters, such as the number of leaves per tree, the maximum depth of the tree, and the learning rate, play an essential role in determining overall model performance and predictive capacity.</p>
</sec>
<sec>
<label>2.2</label>
<title>Metaheuristics optimization</title>
<p>A persistent and fundamental challenge in machine learning lies in the optimization of hyperparameters, a task widely acknowledged as NP-hard because of its immense combinatorial search space and computational complexity. This difficulty is further reinforced by the NFL theorem (<xref ref-type="bibr" rid="B74">Wolpert and Macready, 1997</xref>), which states that no single optimization approach can consistently outperform all others in all category of problems, as its effectiveness is inherently tied to the characteristics of the data set, the performance metrics, and the parameter configurations involved.</p>
<p>To mitigate these constraints, increasing attention has been focused toward metaheuristic optimization techniques. Metaheuristics, particularly those inspired by swarm intelligence, constitute a class of stochastic optimization strategies modeled after the collective behaviors observed in natural systems such as flocks of birds, swarms of insects, and herds of animals. These methods are particularly well-suited for solving complex, NP-hard problems because they maintain a dynamic balance between global exploration of the search space and local exploitation of promising regions. Nevertheless, population-based methods often face the drawback of overemphasizing one of these components, which can lead to premature convergence or suboptimal stagnation. To counteract this, hybrid approaches and adaptive mechanisms are frequently employed to preserve equilibrium and enhance the robustness of the search process.</p>
<p>Well-known members of this algorithmic family include particle swarm optimization (PSO) (<xref ref-type="bibr" rid="B34">Kennedy and Eberhart, 1995</xref>), genetic algorithm (GA) (<xref ref-type="bibr" rid="B47">Mirjalili, 2019</xref>), and numerous nature-inspired variants such as the reptile search algorithm (RSA) (<xref ref-type="bibr" rid="B1">Abualigah et al., 2022</xref>), whale optimization algorithm (WOA) (<xref ref-type="bibr" rid="B48">Mirjalili and Lewis, 2016</xref>), red fox algorithm (RFA) (<xref ref-type="bibr" rid="B55">Po&#x00142;ap and Wo&#x0017A;niak, 2021</xref>), sine cosine algorithm (SCA) (<xref ref-type="bibr" rid="B46">Mirjalili, 2016</xref>), artificial bee colony (ABC) (<xref ref-type="bibr" rid="B30">Karaboga and Basturk, 2007</xref>), firefly algorithm (FA) (<xref ref-type="bibr" rid="B77">Yang and He, 2013b</xref>), elk herd optimization (EHO) (<xref ref-type="bibr" rid="B2">Al-Betar et al., 2024</xref>), variable neighborhood search (VNS) (<xref ref-type="bibr" rid="B49">Mladenovi&#x00107; and Hansen, 1997</xref>), and COLSHADE (<xref ref-type="bibr" rid="B25">Gurrola-Ramos et al., 2020</xref>). Together, these methods form a comprehensive and versatile set of tools capable of addressing diverse and computationally intensive optimization problems across scientific and engineering disciplines.</p>
<p>Metaheuristic approaches have shown strong performance in a variety of domains, including software engineering (<xref ref-type="bibr" rid="B71">Villoth J. P. et al., 2025</xref>; <xref ref-type="bibr" rid="B72">Villoth S. J. et al., 2025</xref>), medical diagnostics (<xref ref-type="bibr" rid="B82">Zivkovic et al., 2023</xref>, <xref ref-type="bibr" rid="B81">2024</xref>), and a range of applied optimization scenarios (<xref ref-type="bibr" rid="B7">Bacanin et al., 2024</xref>; <xref ref-type="bibr" rid="B36">Lakicevic et al., 2024</xref>; <xref ref-type="bibr" rid="B3">Antonijevic et al., 2024</xref>; <xref ref-type="bibr" rid="B54">Petrovic et al., 2025</xref>; <xref ref-type="bibr" rid="B11">Bozovic et al., 2025</xref>). However, their use in the healthcare sector, particularly in the modeling of neurodegenerative disorders and the classification of stages of AD using neuroimaging, remains relatively underexplored (<xref ref-type="bibr" rid="B3">Antonijevic et al., 2024</xref>; <xref ref-type="bibr" rid="B19">Dobrojevic et al., 2024</xref>).</p>
<p>Drawing on their proven success in related areas, the integration of metaheuristic algorithms into neurodegenerative disease prediction represents a promising pathway toward enhancing diagnostic precision, model generalization, and individualized clinical evaluation. In this study, a cooperative dual-layer classification framework is proposed, in which a CNN performs hierarchical MRI feature extraction, followed by XGBoost and LightGBM classifiers for refined stage identification. Crucially, metaheuristic optimization is used to tune the hyperparameters in both phases, forming a unified and adaptive strategy that advances an automated and interpretable classification of AD progression.</p></sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Methods</title>
<p>This section begins with an overview of the conventional VNS algorithm. Then it discusses the primary limitations of the original formulation, followed by a detailed explanation of the modified variant developed in this research, and a brief outline of the complete classification framework.</p>
<sec>
<label>3.1</label>
<title>Basic variable neighborhood search algorithm</title>
<p>Local search algorithms in combinatorial optimization improve an initial candidate solution by iteratively exploring its surrounding configurations and replacing it with a better alternative until no further enhancement of the objective function can be obtained. During each iteration, an improved solution <italic>x</italic> is selected from its neighborhood set <italic>N</italic>(<italic>x</italic>), and the search is completed once a local optimal point is reached. Unlike conventional local search methods that follow a single continuous search trajectory, VNS (<xref ref-type="bibr" rid="B49">Mladenovi&#x00107; and Hansen, 1997</xref>) uses a structured diversification principle. Instead of restricting exploration to a single neighborhood, VNS systematically expands the search to progressively more distant neighborhoods, accepting a new solution only when it provides a measurable improvement. This strategy allows the algorithm to retain the beneficial properties of a near-optimal solution while simultaneously investigating unexplored regions of the search space that may yield superior outcomes. Each newly generated candidate solution is subsequently refined through a local search procedure to promote convergence toward a local optimum.</p>
<p>More precisely, VNS operates using a finite collection of neighborhood structures <italic>N</italic><sub><italic>k</italic></sub>, where <italic>k</italic> &#x0003D; 1, 2, &#x02026;, <italic>k</italic><sub><italic>max</italic></sub>. The algorithm transitions between these neighborhoods through three main stages:</p>
<list list-type="bullet">
<list-item><p>A random candidate <italic>x</italic>&#x02032; is generated within the current neighborhood <italic>N</italic><sub><italic>k</italic></sub>, helping to reduce the likelihood of premature convergence and redundant search cycles.</p></list-item>
<list-item><p>Then a local search is applied to <italic>x</italic>&#x02032;, producing an improved solution <italic>x</italic>&#x02033; that is locally optimal with respect to <italic>N</italic><sub><italic>k</italic></sub>.</p></list-item>
<list-item><p>If <italic>x</italic>&#x02033; demonstrates improvement compared to the current best solution, it replaces it, and the exploration continues within the same neighborhood; otherwise, the procedure advances to the next neighborhood structure.</p></list-item>
</list>
<p>The algorithm terminates when the stopping conditions are met, such as when a predefined number of iterations is reached or the computational budget is exhausted.</p>
</sec>
<sec>
<label>3.2</label>
<title>Modified VNS</title>
<p>Original VNS has been widely recognized as a powerful and adaptable modern generation optimization method that exhibits strong performance across a wide spectrum of application areas. However, despite its reliability and versatility, extensive empirical studies utilizing contemporary benchmark suites (<xref ref-type="bibr" rid="B41">Luo et al., 2022</xref>) have identified several limitations, particularly its relatively restricted exploratory capability during the early phases of the optimization process. In addition, the algorithm may occasionally suffer from premature convergence toward local optima, which can negatively impact its overall convergence efficiency under certain conditions.</p>
<p>To overcome these limitations, the first enhancement introduced in this work focuses on increasing population diversity during the initial optimization phase. This improvement is achieved through the integration of the Quasi-Reflexive Learning (QRL) mechanism (<xref ref-type="bibr" rid="B59">Rahnamayan et al., 2007</xref>) into the population initialization procedure. In this extended scheme, the initial population is divided into two complementary subsets: one generated using the standard VNS initialization process, and the other constructed through QRL-based diversification. The latter subset expands the spatial coverage of the search space from the outset, reducing the possibility of early clustering among agents and promoting a more uniform and comprehensive exploration of the solution landscape. The mathematical formulation of this quasi-reflexive generation procedure is given in <xref ref-type="disp-formula" rid="EQ1">Equation 1</xref>, which defines how mirrored solution vectors are produced to supplement their original counterparts.</p>
<disp-formula id="EQ1"><mml:math id="M1"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:msubsup><mml:mi>X</mml:mi><mml:mi>j</mml:mi><mml:mrow><mml:mi>q</mml:mi><mml:mi>r</mml:mi></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mi>r</mml:mi><mml:mi>n</mml:mi><mml:mi>d</mml:mi><mml:mo stretchy='true'>(</mml:mo><mml:mfrac><mml:mrow><mml:mi>l</mml:mi><mml:msub><mml:mi>b</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:mi>u</mml:mi><mml:msub><mml:mi>b</mml:mi><mml:mi>j</mml:mi></mml:msub></mml:mrow><mml:mn>2</mml:mn></mml:mfrac><mml:mo>,</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>j</mml:mi></mml:msub><mml:mo stretchy='true'>)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(1)</label></disp-formula>
<p>In this formulation, <inline-formula><mml:math id="M2"><mml:mfrac><mml:mrow><mml:mi>l</mml:mi><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:mi>u</mml:mi><mml:msub><mml:mrow><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:mfrac></mml:math></inline-formula> denotes the midpoint between the lower and upper boundaries of the <italic>j</italic>-th dimension in the search space, while <italic>rnd</italic>() produces a random value within the specified interval. QRL therefore generates complementary candidate solutions by probabilistically sampling between the midpoint of the search interval and the current solution, consequently enhancing population diversity during early exploration. A detailed mathematical analysis of this mechanism is provided in the original formulation (<xref ref-type="bibr" rid="B59">Rahnamayan et al., 2007</xref>).</p>
<p>The second improvement incorporated into the VNS algorithm introduces a soft rollback mechanism, designed in this research to alleviate convergence stagnation. This mechanism is triggered when the algorithm does not exhibit notable improvement over a defined interval of <italic>T</italic>/3 iterations, where <italic>T</italic> represents the total number of permitted iterations. The value of this threshold was determined empirically. When stagnation occurs, the population is partially reverted to its most recent productive configuration, allowing the algorithm to recover from unproductive search directions. To implement this mechanism, two auxiliary control parameters are introduced: the stagnation counter (<italic>s</italic>_<italic>count</italic>) and the stagnation threshold (<italic>s</italic>_<italic>tresh</italic>), initialized as <italic>s</italic>_<italic>count</italic> &#x0003D; 0 and <italic>s</italic>_<italic>tresh</italic> &#x0003D; <italic>T</italic>/3. The counter increases with every iteration that lacks an improvement in fitness, and when <italic>s</italic>_<italic>count</italic> reaches <italic>s</italic>_<italic>tresh</italic>, the rollback process begins.</p>
<p>This rollback strategy integrates an elitist preservation principle to safeguard the overall quality of solutions. Specifically, the best-performing individual, defined as the candidate who reaches the optimal fitness value, is retained, while the remaining members of the population are regenerated according to the original initialization procedure of the algorithm. This approach effectively restores population diversity without sacrificing the most promising solution identified so far.</p>
<p>To reflect these algorithmic refinements, the proposed variant is named the quasi-reflexive learning stagnation-aware VNS (QSAVNS). The complete step-by-step procedure of this modified method is presented in <xref ref-type="other" rid="algorithm_1">Algorithm 1</xref>.</p>
<statement content-type="algorithm" id="algorithm_1">
<label>Algorithm 1</label>
<title>QSAVNS procedural logic.</title>
<p>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-20-1731812-i001.tif"/>
</p>
</statement>
<p>Because elapsed runtime is heavily relying on hardware and implementation specifics, algorithmic complexity of metaheuristics is typically assessed in terms of fitness function evaluations, which is the standard and more reliable measure in metaheuristic optimization research. In each run, the fitness function evaluation (FFE), corresponding to model training and validation for one hyperparameter configuration, is the most computationally expensive operation and therefore dominates the overall runtime of the algorithm. From a computational standpoint, QSAVNS preserves the same fitness-evaluation complexity as baseline VNS. The QRL-based initialization and stagnation-aware rollback only alter solution generation/diversification, while maintaining <italic>N</italic> fitness evaluations within each of <italic>T</italic> iterations. Consequently, the overall complexity remains <italic>O</italic>(<italic>N</italic>&#x000D7;<italic>T</italic>) in FFEs, which is identical to baseline VNS.</p>
</sec>
<sec>
<label>3.3</label>
<title>Proposed framework</title>
<p>The proposed method operates as the core optimization engine within a two-layer classification architecture. In this design, the hyperparameters of the classifiers&#x00027; hyperparameters are represented as agent-specific variables, and optimization is carried out iteratively through repeated cycles of model training, parameter adjustment, and performance evaluation until a predefined convergence criterion is satisfied.</p>
<p>At the first level (L1), this iterative optimization is applied to CNNs. Once the most suitable CNN configuration is identified, its final output layer is removed, and the intermediate feature embeddings learned during training are extracted. These representations are subsequently passed to the second level (L2), where ensemble boosting classifiers are employed. In this second stage, the boosting models also undergo metaheuristic optimization, with their hyperparameters encoded as evolutionary traits of the agents within the population.</p></sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Experimental setup</title>
<sec>
<label>4.1</label>
<title>Dataset overview</title>
<p>For this research, a dataset was used from the Kaggle platform.<xref ref-type="fn" rid="fn0004"><sup>2</sup></xref> The dataset was reduced to 10% of its original volume while preserving proportional representation between all classes to maintain balance. It is intended for the classification of AD stages and contains four distinct categories: No Dementia (class 0), Very Mild Dementia (class 1), Mild Dementia (class 2), and Moderate Dementia (class 3). The original dataset was already partitioned into training and testing subsets by class and was utilized in this study in its existing form, without any additional preprocessing or modification.</p>
</sec>
<sec>
<label>4.2</label>
<title>Evaluation metrics</title>
<p>During the simulation phase, the performance of the model was assessed using a standard set of classification metrics, namely precision, precision, recall, and the F1-score, formally defined in <xref ref-type="disp-formula" rid="EQ2">Equations 2</xref>&#x02013;<xref ref-type="disp-formula" rid="EQ5">5</xref>.</p>
<disp-formula id="EQ2"><mml:math id="M6"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(2)</label></disp-formula>
<disp-formula id="EQ3"><mml:math id="M7"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(3)</label></disp-formula>
<disp-formula id="EQ4"><mml:math id="M8"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(4)</label></disp-formula>
<disp-formula id="EQ5"><mml:math id="M9"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:mn>1</mml:mn><mml:mtext>_</mml:mtext><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mo>&#x000B7;</mml:mo><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#x000B7;</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(5)</label></disp-formula>
<p>In the above equations, <italic>TP</italic>, <italic>TN</italic>, <italic>FP</italic>, and <italic>FN</italic> denote the numbers of true positives, true negatives, false positives, and false negatives, respectively.</p>
<p>In addition to conventional classification metrics, the Matthews correlation coefficient (MCC) (<xref ref-type="bibr" rid="B44">Matthews, 1975</xref>) was used as an additional evaluation measure. Due to its robustness in handling imbalanced class distributions, the MCC offers a more comprehensive and reliable indication of the overall performance of the model. Its calculation is formally defined in <xref ref-type="disp-formula" rid="EQ6">Equation 6</xref>.</p>
<disp-formula id="EQ6"><mml:math id="M10"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mtext class="textrm" mathvariant="normal">MCC</mml:mtext><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>F</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x000D7;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:msqrt><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msqrt></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(6)</label></disp-formula>
<p>Across all simulation experiments, the MCC was designated as the main optimization objective with the aim of maximizing its value to obtain the most balanced and accurate classification results.</p>
</sec>
<sec>
<label>4.3</label>
<title>Experimental setup</title>
<p>A series of three experimental studies was conducted in which metaheuristic optimization algorithms were employed to fine-tune the parameters across both layers of the proposed dual-stage classification framework. The architecture consisted of a CNN as the first-layer module, followed by XGBoost and LightGBM classifiers that form the second-layer classification component. In each experimental scenario, the proposed QSAVNS metaheuristic served as the principal optimization algorithm, and its performance was systematically compared with several well-established optimization methods. The comparison group included the canonical VNS (<xref ref-type="bibr" rid="B49">Mladenovi&#x00107; and Hansen, 1997</xref>), GA (<xref ref-type="bibr" rid="B47">Mirjalili, 2019</xref>), PSO (<xref ref-type="bibr" rid="B34">Kennedy and Eberhart, 1995</xref>), ABC (<xref ref-type="bibr" rid="B30">Karaboga and Basturk, 2007</xref>), BA (<xref ref-type="bibr" rid="B76">Yang and He, 2013a</xref>), SCHO (<xref ref-type="bibr" rid="B8">Bai et al., 2023</xref>), and EHO (<xref ref-type="bibr" rid="B2">Al-Betar et al., 2024</xref>), providing a representative balance between the classical and more recent optimization paradigms. All competing algorithms were executed using their standard parameter configurations as specified in the original studies. To maintain methodological consistency, identical experimental conditions were applied to each algorithm in all three experiments. To minimize the effect of stochastic variability inherent in metaheuristic processes, each method was independently executed 30 times.</p>
<p>Given the high computational cost of CNN training, the first-layer (L1) experiments used a reduced population of eight candidate solutions (<italic>N</italic> &#x0003D; 8) and a maximum of five iterations per run (<italic>max</italic>_<italic>iter</italic> &#x0003D; 5). For the optimization of XGBoost and LightGBM, the population size was set to ten (<italic>N</italic> &#x0003D; 10), with ten iterations per execution (<italic>max</italic>_<italic>iter</italic> &#x0003D; 10). Within the metaheuristic optimization procedure, each individual in the population encodes a unique configuration of a neural network or ensemble model (CNN, XGBoost, or LightGBM) along with its associated hyperparameters. Evaluating each configuration requires multiple training-validation cycles, which are computationally demanding. To alleviate this burden, the size of the population and the iteration count were deliberately constrained, thus reducing the total number of retraining operations. Furthermore, once the population size exceeds a certain threshold, additional expansion typically produces negligible improvements in optimization performance. Empirical evidence suggests that metaheuristic algorithms can often converge to near-optimal solutions even with moderate population sizes, providing an efficient and resource-conscious approach to solving high-cost optimization tasks. As previously stated, the optimization objective was defined as the maximization of the MCC.</p>
<p>In the first experimental setup, metaheuristic algorithms were applied to optimize the CNN component in the initial layer (L1) of the proposed framework. The tunable CNN hyperparameters, listed in <xref ref-type="table" rid="T1">Table 1</xref>, were intentionally limited to lightweight configurations to allow potential deployment on resource-constrained platforms such as ESP32 or Arduino. A batch size of 512 was used and an early stopping was triggered after one-third of the total training epochs. The target image resolution was fixed at (32, 32) using the RGB color mode and categorical label encoding. Within the cooperative dual-layer design, the optimized CNN from this stage provided the feature extraction foundation for the subsequent ensemble-based classification phase.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Model configurations with corresponding optimized hyperparameters and their respective search ranges.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Model</bold></th>
<th valign="top" align="left"><bold>Hyperparameter</bold></th>
<th valign="top" align="center"><bold>Low limit</bold></th>
<th valign="top" align="center"><bold>High limit</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="6">L1 CNN</td>
<td valign="top" align="left">Learning rate</td>
<td valign="top" align="center">0.0001</td>
<td valign="top" align="center">0.003</td>
</tr>
<tr>
<td valign="top" align="left">Dropout</td>
<td valign="top" align="center">0.05</td>
<td valign="top" align="center">0.2</td>
</tr>
<tr>
<td valign="top" align="left">Epochs</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">30</td>
</tr>
<tr>
<td valign="top" align="left">Convolutional layers</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">2</td>
</tr>
<tr>
<td valign="top" align="left">Dense layers</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">2</td>
</tr>
<tr>
<td valign="top" align="left">Cells per layer</td>
<td valign="top" align="center">32</td>
<td valign="top" align="center">96</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="6">L2 XGBoost</td>
<td valign="top" align="left">Learning rate</td>
<td valign="top" align="center">0.1</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="left">Minimum child weight</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
</tr>
<tr>
<td valign="top" align="left">Subsample</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1</td>
</tr>
<tr>
<td valign="top" align="left">Col sample by tree</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">1</td>
</tr>
<tr>
<td valign="top" align="left">Max depth</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
</tr>
<tr>
<td valign="top" align="left">Gamma</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.8</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="10">L2 LightGBM</td>
<td valign="top" align="left">Number of rounds</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">20</td>
</tr>
<tr>
<td valign="top" align="left">Max depth</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">10</td>
</tr>
<tr>
<td valign="top" align="left">Number of leaves</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">10</td>
</tr>
<tr>
<td valign="top" align="left">Minimum child weight</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
</tr>
<tr>
<td valign="top" align="left">Feature fraction</td>
<td valign="top" align="center">0.1</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="left">Bagging fraction</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">1</td>
</tr>
<tr>
<td valign="top" align="left">Min split gain</td>
<td valign="top" align="center">0.001</td>
<td valign="top" align="center">0.1</td>
</tr>
<tr>
<td valign="top" align="left">Lambda l1</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">5</td>
</tr>
<tr>
<td valign="top" align="left">Lambda l2</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">3</td>
</tr>
<tr>
<td valign="top" align="left">Learning rate</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">0.9</td>
</tr></tbody>
</table>
</table-wrap>
<p>In the second experimental configuration, the XGBoost algorithm was applied within the classification layer (L2) of the framework. For this purpose, the intermediate feature embeddings generated by the optimized CNN from the first layer were extracted from the output of the dropout layer and saved for all data samples. Then these characteristic vectors were split into training and testing subsets following a 70%&#x02013;30% ratio. The resulting feature representation was used as input for both the training and hyperparameter optimization of the XGBoost classifier. The specific parameters selected for tuning, along with their search intervals, are summarized in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<p>In the third experimental study, the LightGBM algorithm was integrated into the second layer (L2) of the proposed architecture. The classifier was trained and optimized using the same intermediate feature representations produced by the CNN in the preceding experiment. The LightGBM hyperparameters chosen for optimization, along with their defined search ranges, are also presented in <xref ref-type="table" rid="T1">Table 1</xref>.</p></sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Simulation results</title>
<p>The experimental analyses concentrated on the integration of CNNs within the first layer (L1) of the proposed framework, where they handled the initial processing and extraction of discriminative features from MRI images corresponding to different stages of AD. In the subsequent layer (L2), gradient boosting classifiers were used to perform the final stage classification. At this level, two competitive boosting models, XGBoost and LightGBM, were utilized, both exhibiting strong and consistently stable performance throughout the evaluation process.</p>
<sec>
<label>5.1</label>
<title>L1 CNN</title>
<p><xref ref-type="table" rid="T2">Table 2</xref> presents a comparative analysis of CNN models optimized using several metaheuristic algorithms, with the MCC serving as the primary objective metric. Among the evaluated methods, the proposed QSAVNS optimizer achieved the best overall result, achieving a maximum MCC of 0.287398. In comparison, the strongest worst-case performance was obtained by SCHO (0.239755), which also produced the highest mean MCC (0.251198) and the best median value (0.249601). Furthermore, it is worth noting that within this experimental configuration, the ABC algorithm exhibited the most consistent behavior, as evidenced by its minimal variance across multiple independent runs.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Results of the objective and indicator functions obtained during L1 CNN optimization.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center"><bold>Best</bold></th>
<th valign="top" align="center"><bold>Worst</bold></th>
<th valign="top" align="center"><bold>Mean</bold></th>
<th valign="top" align="center"><bold>Median</bold></th>
<th valign="top" align="center"><bold>Std</bold></th>
<th valign="top" align="center"><bold>Var</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">CNN-QSAVNS</td>
<td valign="top" align="center">0.287398</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.156415</td>
<td valign="top" align="center">0.169131</td>
<td valign="top" align="center">0.129712</td>
<td valign="top" align="center">0.016825</td>
</tr>
<tr>
<td valign="top" align="left">CNN-VNS</td>
<td valign="top" align="center">0.229776</td>
<td valign="top" align="center">0.080976</td>
<td valign="top" align="center">0.154687</td>
<td valign="top" align="center">0.153998</td>
<td valign="top" align="center">0.060673</td>
<td valign="top" align="center">0.003681</td>
</tr>
<tr>
<td valign="top" align="left">CNN-GA</td>
<td valign="top" align="center">0.026130</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.006532</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.011314</td>
<td valign="top" align="center">0.000128</td>
</tr>
<tr>
<td valign="top" align="left">CNN-PSO</td>
<td valign="top" align="center">0.255399</td>
<td valign="top" align="center">0.212239</td>
<td valign="top" align="center">0.234623</td>
<td valign="top" align="center">0.235426</td>
<td valign="top" align="center">0.019539</td>
<td valign="top" align="center">0.000382</td>
</tr>
<tr>
<td valign="top" align="left">CNN-ABC</td>
<td valign="top" align="center">0.244962</td>
<td valign="top" align="center">0.228073</td>
<td valign="top" align="center">0.237688</td>
<td valign="top" align="center">0.238860</td>
<td valign="top" align="center">0.007073</td>
<td valign="top" align="center">5.00E-05</td>
</tr>
<tr>
<td valign="top" align="left">CNN-BA</td>
<td valign="top" align="center">0.248187</td>
<td valign="top" align="center">0.158546</td>
<td valign="top" align="center">0.208912</td>
<td valign="top" align="center">0.214457</td>
<td valign="top" align="center">0.032671</td>
<td valign="top" align="center">0.001067</td>
</tr>
<tr>
<td valign="top" align="left">CNN-SCHO</td>
<td valign="top" align="center">0.265833</td>
<td valign="top" align="center">0.239755</td>
<td valign="top" align="center">0.251198</td>
<td valign="top" align="center">0.249601</td>
<td valign="top" align="center">0.011586</td>
<td valign="top" align="center">0.000134</td>
</tr>
<tr>
<td valign="top" align="left">CNN-EHO</td>
<td valign="top" align="center">0.054054</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.018991</td>
<td valign="top" align="center">0.010954</td>
<td valign="top" align="center">0.022132</td>
<td valign="top" align="center">0.000490</td>
</tr>
<tr>
<td valign="top" align="left" colspan="7"><bold>Error rate</bold></td>
</tr>
<tr>
<td valign="top" align="left">CNN-QSAVNS</td>
<td valign="top" align="center">0.554545</td>
<td valign="top" align="center">0.772727</td>
<td valign="top" align="center">0.645833</td>
<td valign="top" align="center">0.628030</td>
<td valign="top" align="center">0.092692</td>
<td valign="top" align="center">0.008592</td>
</tr>
<tr>
<td valign="top" align="left">CNN-VNS</td>
<td valign="top" align="center">0.611364</td>
<td valign="top" align="center">0.756818</td>
<td valign="top" align="center">0.689583</td>
<td valign="top" align="center">0.695076</td>
<td valign="top" align="center">0.053358</td>
<td valign="top" align="center">0.002847</td>
</tr>
<tr>
<td valign="top" align="left">CNN-GA</td>
<td valign="top" align="center">0.770455</td>
<td valign="top" align="center">0.772727</td>
<td valign="top" align="center">0.772159</td>
<td valign="top" align="center">0.772727</td>
<td valign="top" align="center">0.000984</td>
<td valign="top" align="center">9.68E-07</td>
</tr>
<tr>
<td valign="top" align="left">CNN-PSO</td>
<td valign="top" align="center">0.569697</td>
<td valign="top" align="center">0.674242</td>
<td valign="top" align="center">0.626326</td>
<td valign="top" align="center">0.630682</td>
<td valign="top" align="center">0.045389</td>
<td valign="top" align="center">0.002060</td>
</tr>
<tr>
<td valign="top" align="left">CNN-ABC</td>
<td valign="top" align="center">0.636364</td>
<td valign="top" align="center">0.597727</td>
<td valign="top" align="center">0.616856</td>
<td valign="top" align="center">0.616667</td>
<td valign="top" align="center">0.014167</td>
<td valign="top" align="center">0.000201</td>
</tr>
<tr>
<td valign="top" align="left">CNN-BA</td>
<td valign="top" align="center">0.600000</td>
<td valign="top" align="center">0.706061</td>
<td valign="top" align="center">0.659280</td>
<td valign="top" align="center">0.665530</td>
<td valign="top" align="center">0.039570</td>
<td valign="top" align="center">0.001566</td>
</tr>
<tr>
<td valign="top" align="left">CNN-SCHO</td>
<td valign="top" align="center">0.568939</td>
<td valign="top" align="center">0.589394</td>
<td valign="top" align="center">0.586742</td>
<td valign="top" align="center">0.585606</td>
<td valign="top" align="center">0.013705</td>
<td valign="top" align="center">0.000188</td>
</tr>
<tr>
<td valign="top" align="left">CNN-EHO</td>
<td valign="top" align="center">0.752273</td>
<td valign="top" align="center">0.772727</td>
<td valign="top" align="center">0.767424</td>
<td valign="top" align="center">0.772348</td>
<td valign="top" align="center">0.008753</td>
<td valign="top" align="center">7.66E-05</td>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="T2">Table 2</xref> also reports the results of the indicator function expressed in terms of the error rate for CNN models optimized by the same set of metaheuristic techniques. QSAVNS achieved the lowest absolute error rate of 0.554545, while SCHO recorded the best mean error rate (0.586742), median error rate (0.585606), and the most favorable worst-case result (0.589394). Although GA did not achieve the top-performing absolute score, it demonstrated notable stability across repeated executions, indicating high robustness despite slightly lower overall optimization effectiveness.</p>
<p><xref ref-type="table" rid="T3">Table 3</xref> provides a comprehensive overview of the evaluation metrics corresponding to the CNN classifiers that achieved the best performance under different metaheuristic optimization methods. The findings show that the proposed QSAVNS algorithm generated a robust CNN model, reaching an overall classification accuracy of 0.445455, accompanied by consistently solid precision, recall (sensitivity), and F1-scores in most categories. Nevertheless, a clear pattern emerged in all the models, each showing a limited ability to accurately differentiate among the four stages of AD. This limitation underscores the need for additional methodological refinements, which are explored in the following sections of this study.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Comprehensive assessment of the best-performing CNN models yielded by the optimization process.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Approach</bold></th>
<th valign="top" align="left"><bold>Metric</bold></th>
<th valign="top" align="center"><bold>0</bold></th>
<th valign="top" align="center"><bold>1</bold></th>
<th valign="top" align="center"><bold>2</bold></th>
<th valign="top" align="center"><bold>3</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
<th valign="top" align="center"><bold>Macro ave</bold></th>
<th valign="top" align="center"><bold>Weight ave</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="3">CNN-QSAVNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.424501</td>
<td valign="top" align="center">0.4</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0.470480</td>
<td valign="top" align="center">0.445455</td>
<td valign="top" align="center">0.448745</td>
<td valign="top" align="center">0.445873</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.776042</td>
<td valign="top" align="center">0.035714</td>
<td valign="top" align="center">0.076667</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.445455</td>
<td valign="top" align="center">0.434606</td>
<td valign="top" align="center">0.445455</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.548803</td>
<td valign="top" align="center">0.065574</td>
<td valign="top" align="center">0.132948</td>
<td valign="top" align="center">0.605701</td>
<td valign="top" align="center">0.445455</td>
<td valign="top" align="center">0.338256</td>
<td valign="top" align="center">0.344218</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-VNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.490196</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.324691</td>
<td valign="top" align="center">0.388636</td>
<td valign="top" align="center">0.203722</td>
<td valign="top" align="center">0.216396</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.651042</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.876667</td>
<td valign="top" align="center">0.388636</td>
<td valign="top" align="center">0.381927</td>
<td valign="top" align="center">0.388636</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.559284</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.473874</td>
<td valign="top" align="center">0.388636</td>
<td valign="top" align="center">0.258289</td>
<td valign="top" align="center">0.270399</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-GA</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.228311</td>
<td valign="top" align="center">0.229545</td>
<td valign="top" align="center">0.182078</td>
<td valign="top" align="center">0.179161</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.008929</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.229545</td>
<td valign="top" align="center">0.252232</td>
<td valign="top" align="center">0.229545</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.017544</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.371747</td>
<td valign="top" align="center">0.229545</td>
<td valign="top" align="center">0.097323</td>
<td valign="top" align="center">0.088954</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-PSO</td>
<td valign="top" align="left">precision</td>
<td valign="top" align="center">0.425041</td>
<td valign="top" align="center">0.317073</td>
<td valign="top" align="center">0.446043</td>
<td valign="top" align="center">0.440901</td>
<td valign="top" align="center">0.430303</td>
<td valign="top" align="center">0.407265</td>
<td valign="top" align="center">0.405936</td>
</tr>
<tr>
<td valign="top" align="left">recall</td>
<td valign="top" align="center">0.671875</td>
<td valign="top" align="center">0.038690</td>
<td valign="top" align="center">0.206667</td>
<td valign="top" align="center">0.783333</td>
<td valign="top" align="center">0.430303</td>
<td valign="top" align="center">0.425141</td>
<td valign="top" align="center">0.430303</td>
</tr>
<tr>
<td valign="top" align="left">f1-score</td>
<td valign="top" align="center">0.520686</td>
<td valign="top" align="center">0.068966</td>
<td valign="top" align="center">0.282460</td>
<td valign="top" align="center">0.564226</td>
<td valign="top" align="center">0.430303</td>
<td valign="top" align="center">0.359084</td>
<td valign="top" align="center">0.361456</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-ABC</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.762887</td>
<td valign="top" align="center">0.457831</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.281879</td>
<td valign="top" align="center">0.363636</td>
<td valign="top" align="center">0.375649</td>
<td valign="top" align="center">0.402533</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.385417</td>
<td valign="top" align="center">0.113095</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.363636</td>
<td valign="top" align="center">0.369628</td>
<td valign="top" align="center">0.363636</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.512111</td>
<td valign="top" align="center">0.181384</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.437826</td>
<td valign="top" align="center">0.363636</td>
<td valign="top" align="center">0.282830</td>
<td valign="top" align="center">0.294654</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-BA</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.498054</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.337469</td>
<td valign="top" align="center">0.4</td>
<td valign="top" align="center">0.208881</td>
<td valign="top" align="center">0.221586</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.666667</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.906667</td>
<td valign="top" align="center">0.4</td>
<td valign="top" align="center">0.393333</td>
<td valign="top" align="center">0.4</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.570156</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.491863</td>
<td valign="top" align="center">0.4</td>
<td valign="top" align="center">0.265505</td>
<td valign="top" align="center">0.277650</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-SCHO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.528708</td>
<td valign="top" align="center">0.406780</td>
<td valign="top" align="center">0.381720</td>
<td valign="top" align="center">0.385084</td>
<td valign="top" align="center">0.431061</td>
<td valign="top" align="center">0.425573</td>
<td valign="top" align="center">0.431624</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.575521</td>
<td valign="top" align="center">0.071429</td>
<td valign="top" align="center">0.236667</td>
<td valign="top" align="center">0.843333</td>
<td valign="top" align="center">0.431061</td>
<td valign="top" align="center">0.431737</td>
<td valign="top" align="center">0.431061</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.551122</td>
<td valign="top" align="center">0.121519</td>
<td valign="top" align="center">0.292181</td>
<td valign="top" align="center">0.528736</td>
<td valign="top" align="center">0.431061</td>
<td valign="top" align="center">0.373389</td>
<td valign="top" align="center">0.377831</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-EHO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.326613</td>
<td valign="top" align="center">0.327869</td>
<td valign="top" align="center">0.274510</td>
<td valign="top" align="center">0.047619</td>
<td valign="top" align="center">0.278788</td>
<td valign="top" align="center">0.244153</td>
<td valign="top" align="center">0.251683</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.632813</td>
<td valign="top" align="center">0.178571</td>
<td valign="top" align="center">0.186667</td>
<td valign="top" align="center">0.03</td>
<td valign="top" align="center">0.278788</td>
<td valign="top" align="center">0.257013</td>
<td valign="top" align="center">0.278788</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.430851</td>
<td valign="top" align="center">0.231214</td>
<td valign="top" align="center">0.222222</td>
<td valign="top" align="center">0.036810</td>
<td valign="top" align="center">0.278788</td>
<td valign="top" align="center">0.230274</td>
<td valign="top" align="center">0.243064</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">Samples</td>
<td valign="top" align="center">384</td>
<td valign="top" align="center">336</td>
<td valign="top" align="center">300</td>
<td valign="top" align="center">300</td>
<td/>
<td/>
<td/>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="F1">Figure 1</xref> shows the architecture of the L1 CNN model with the best performance, along with its truncated counterpart. The left diagram shows the full CNN architecture used for end-to-end training and L1 optimization. The right diagram illustrates the truncated CNN obtained by removing the final classification layers. This model outputs intermediate feature embeddings that are subsequently used as input for the L2 ensemble classifiers.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Optimized CNN architecture <bold>(left)</bold> and its truncated version <bold>(right)</bold>, where the final classification layers are removed to extract intermediate feature embeddings for L2 ensemble classification.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-20-1731812-g0001.tif">
<alt-text content-type="machine-generated">Diagram showing a neural network architecture with two similar branches. Each branch consists of an InputLayer, two Conv2D and MaxPooling2D layers, followed by a Flatten layer. Next, there are two Dense layers interspersed with a Dropout layer. Each step shows input and output shapes. The final output is from an Activation layer.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>5.2</label>
<title>L2 XGBoost</title>
<p><xref ref-type="table" rid="T4">Table 4</xref> presents a comparative evaluation of the XGBoost second-layer classifiers optimized using different metaheuristic algorithms, with the MCC serving as the primary evaluation metric. The proposed QSAVNS optimizer achieved the highest best-case result, achieving a peak MCC of 0.812047, while also demonstrating outstanding stability across other evaluation measures by recording the best mean values (0.796531) and median values (0.797621). These results highlight the robustness and reliability of QSAVNS as an optimization approach. Furthermore, QSAVNS achieved the strongest worst-case result (0.769870), while GA exhibited the lowest variability across repeated executions, indicating strong consistency in its optimization performance.</p>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Results of the objective and indicator function evaluations for L2 XGBoost optimization.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center"><bold>Best</bold></th>
<th valign="top" align="center"><bold>Worst</bold></th>
<th valign="top" align="center"><bold>Mean</bold></th>
<th valign="top" align="center"><bold>Median</bold></th>
<th valign="top" align="center"><bold>Std</bold></th>
<th valign="top" align="center"><bold>Var</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">CNN-XGB-QSAVNS</td>
<td valign="top" align="center">0.812047</td>
<td valign="top" align="center">0.769870</td>
<td valign="top" align="center">0.796531</td>
<td valign="top" align="center">0.797621</td>
<td valign="top" align="center">0.010066</td>
<td valign="top" align="center">0.000101</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-VNS</td>
<td valign="top" align="center">0.800002</td>
<td valign="top" align="center">0.718421</td>
<td valign="top" align="center">0.769789</td>
<td valign="top" align="center">0.775052</td>
<td valign="top" align="center">0.021384</td>
<td valign="top" align="center">0.000457</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-GA</td>
<td valign="top" align="center">0.800601</td>
<td valign="top" align="center">0.764087</td>
<td valign="top" align="center">0.784403</td>
<td valign="top" align="center">0.783531</td>
<td valign="top" align="center">0.009334</td>
<td valign="top" align="center">8.71E-05</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-PSO</td>
<td valign="top" align="center">0.799625</td>
<td valign="top" align="center">0.734070</td>
<td valign="top" align="center">0.779291</td>
<td valign="top" align="center">0.783115</td>
<td valign="top" align="center">0.015243</td>
<td valign="top" align="center">0.000232</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-ABC</td>
<td valign="top" align="center">0.771431</td>
<td valign="top" align="center">0.650755</td>
<td valign="top" align="center">0.720854</td>
<td valign="top" align="center">0.729989</td>
<td valign="top" align="center">0.034184</td>
<td valign="top" align="center">0.001169</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-BA</td>
<td valign="top" align="center">0.804570</td>
<td valign="top" align="center">0.752267</td>
<td valign="top" align="center">0.779033</td>
<td valign="top" align="center">0.780635</td>
<td valign="top" align="center">0.014211</td>
<td valign="top" align="center">0.000202</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-SCHO</td>
<td valign="top" align="center">0.799465</td>
<td valign="top" align="center">0.743217</td>
<td valign="top" align="center">0.779011</td>
<td valign="top" align="center">0.782598</td>
<td valign="top" align="center">0.013448</td>
<td valign="top" align="center">0.000181</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-EHO</td>
<td valign="top" align="center">0.800585</td>
<td valign="top" align="center">0.744689</td>
<td valign="top" align="center">0.780002</td>
<td valign="top" align="center">0.786002</td>
<td valign="top" align="center">0.018699</td>
<td valign="top" align="center">0.000350</td>
</tr>
<tr>
<td valign="top" align="left" colspan="7"><bold>Error rate</bold></td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-QSAVNS</td>
<td valign="top" align="center">0.140909</td>
<td valign="top" align="center">0.172727</td>
<td valign="top" align="center">0.152475</td>
<td valign="top" align="center">0.151515</td>
<td valign="top" align="center">0.007588</td>
<td valign="top" align="center">5.76E-05</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-VNS</td>
<td valign="top" align="center">0.15</td>
<td valign="top" align="center">0.210606</td>
<td valign="top" align="center">0.172424</td>
<td valign="top" align="center">0.168182</td>
<td valign="top" align="center">0.015967</td>
<td valign="top" align="center">0.000255</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-GA</td>
<td valign="top" align="center">0.149242</td>
<td valign="top" align="center">0.176515</td>
<td valign="top" align="center">0.161515</td>
<td valign="top" align="center">0.162121</td>
<td valign="top" align="center">0.006966</td>
<td valign="top" align="center">4.85E-05</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-PSO</td>
<td valign="top" align="center">0.15</td>
<td valign="top" align="center">0.199242</td>
<td valign="top" align="center">0.165354</td>
<td valign="top" align="center">0.162121</td>
<td valign="top" align="center">0.011394</td>
<td valign="top" align="center">0.000130</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-ABC</td>
<td valign="top" align="center">0.171212</td>
<td valign="top" align="center">0.262121</td>
<td valign="top" align="center">0.209091</td>
<td valign="top" align="center">0.202273</td>
<td valign="top" align="center">0.025598</td>
<td valign="top" align="center">0.000655</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-BA</td>
<td valign="top" align="center">0.146212</td>
<td valign="top" align="center">0.185606</td>
<td valign="top" align="center">0.165505</td>
<td valign="top" align="center">0.164394</td>
<td valign="top" align="center">0.010642</td>
<td valign="top" align="center">0.000113</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-SCHO</td>
<td valign="top" align="center">0.15</td>
<td valign="top" align="center">0.192424</td>
<td valign="top" align="center">0.165455</td>
<td valign="top" align="center">0.162879</td>
<td valign="top" align="center">0.010110</td>
<td valign="top" align="center">0.000102</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-EHO</td>
<td valign="top" align="center">0.149242</td>
<td valign="top" align="center">0.191667</td>
<td valign="top" align="center">0.164949</td>
<td valign="top" align="center">0.160606</td>
<td valign="top" align="center">0.014142</td>
<td valign="top" align="center">0.000200</td>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="T4">Table 4</xref> also includes a comparative analysis based on the error rate indicator for the same XGBoost classifiers optimized with different metaheuristic methods. The most favorable result, corresponding to the lowest best-case error rate of 0.140909, was achieved by the proposed QSAVNS. In addition, QSAVNS outperformed competing algorithms by achieving the best mean and median error rates, measured at 0.152475 and 0.151515, respectively, confirming its high stability between independent runs. It also obtained the lowest worst-case error rate (0.172727) and demonstrated excellent consistency (second only to GA) in this evaluation scenario.</p>
<p><xref ref-type="table" rid="T5">Table 5</xref> provides detailed evaluation metrics for the top-performing L2 XGBoost classifiers optimized with different metaheuristic algorithms. Among them, the proposed QSAVNS achieved the best overall performance, yielding the most accurate CNN-XGBoost-based model with a maximum classification accuracy of 0.859091, while maintaining consistently high precision, recall (sensitivity), and F1-scores across all classes. An important observation from these results is that integrating XGBoost into the second layer of the framework significantly enhanced overall precision compared to standalone CNN models, notably improving the differentiation between AD stages. Nevertheless, as shown in the next subsection, the XGBoost classifiers were significantly outperformed by the LightGBM models implemented in the same layer.</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Comprehensive assessment of the best-performing L2 XGBoost models produced through the optimization process.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Approach</bold></th>
<th valign="top" align="left"><bold>Metric</bold></th>
<th valign="top" align="center"><bold>0</bold></th>
<th valign="top" align="center"><bold>1</bold></th>
<th valign="top" align="center"><bold>2</bold></th>
<th valign="top" align="center"><bold>3</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
<th valign="top" align="center"><bold>Macro ave</bold></th>
<th valign="top" align="center"><bold>Weight ave</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-QSAVNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.844059</td>
<td valign="top" align="center">0.852349</td>
<td valign="top" align="center">0.824503</td>
<td valign="top" align="center">0.917722</td>
<td valign="top" align="center">0.859091</td>
<td valign="top" align="center">0.859658</td>
<td valign="top" align="center">0.858466</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.888021</td>
<td valign="top" align="center">0.755952</td>
<td valign="top" align="center">0.83</td>
<td valign="top" align="center">0.966667</td>
<td valign="top" align="center">0.859091</td>
<td valign="top" align="center">0.860160</td>
<td valign="top" align="center">0.859091</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.865482</td>
<td valign="top" align="center">0.801262</td>
<td valign="top" align="center">0.827243</td>
<td valign="top" align="center">0.941558</td>
<td valign="top" align="center">0.859091</td>
<td valign="top" align="center">0.858886</td>
<td valign="top" align="center">0.857734</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-VNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.823671</td>
<td valign="top" align="center">0.828859</td>
<td valign="top" align="center">0.846690</td>
<td valign="top" align="center">0.906542</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.851441</td>
<td valign="top" align="center">0.849058</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.888021</td>
<td valign="top" align="center">0.735119</td>
<td valign="top" align="center">0.81</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.850785</td>
<td valign="top" align="center">0.85</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.854637</td>
<td valign="top" align="center">0.779180</td>
<td valign="top" align="center">0.827939</td>
<td valign="top" align="center">0.937198</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.849738</td>
<td valign="top" align="center">0.848126</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-GA</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.854545</td>
<td valign="top" align="center">0.807927</td>
<td valign="top" align="center">0.844523</td>
<td valign="top" align="center">0.895062</td>
<td valign="top" align="center">0.850758</td>
<td valign="top" align="center">0.850514</td>
<td valign="top" align="center">0.849609</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.856771</td>
<td valign="top" align="center">0.788690</td>
<td valign="top" align="center">0.796667</td>
<td valign="top" align="center">0.966667</td>
<td valign="top" align="center">0.850758</td>
<td valign="top" align="center">0.852199</td>
<td valign="top" align="center">0.850758</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.855657</td>
<td valign="top" align="center">0.798193</td>
<td valign="top" align="center">0.819897</td>
<td valign="top" align="center">0.929487</td>
<td valign="top" align="center">0.850758</td>
<td valign="top" align="center">0.850808</td>
<td valign="top" align="center">0.849682</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-PSO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.837093</td>
<td valign="top" align="center">0.815873</td>
<td valign="top" align="center">0.838028</td>
<td valign="top" align="center">0.909938</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.850233</td>
<td valign="top" align="center">0.848460</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.869792</td>
<td valign="top" align="center">0.764881</td>
<td valign="top" align="center">0.793333</td>
<td valign="top" align="center">0.976667</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.851168</td>
<td valign="top" align="center">0.85</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.853129</td>
<td valign="top" align="center">0.789555</td>
<td valign="top" align="center">0.815068</td>
<td valign="top" align="center">0.942122</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.849969</td>
<td valign="top" align="center">0.848522</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-ABC</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.810345</td>
<td valign="top" align="center">0.813725</td>
<td valign="top" align="center">0.819788</td>
<td valign="top" align="center">0.873846</td>
<td valign="top" align="center">0.828788</td>
<td valign="top" align="center">0.829426</td>
<td valign="top" align="center">0.827784</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.856771</td>
<td valign="top" align="center">0.741071</td>
<td valign="top" align="center">0.773333</td>
<td valign="top" align="center">0.946667</td>
<td valign="top" align="center">0.828788</td>
<td valign="top" align="center">0.829461</td>
<td valign="top" align="center">0.828788</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.832911</td>
<td valign="top" align="center">0.775701</td>
<td valign="top" align="center">0.795883</td>
<td valign="top" align="center">0.9088</td>
<td valign="top" align="center">0.828788</td>
<td valign="top" align="center">0.828324</td>
<td valign="top" align="center">0.827181</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-BA</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.844221</td>
<td valign="top" align="center">0.799392</td>
<td valign="top" align="center">0.861818</td>
<td valign="top" align="center">0.915094</td>
<td valign="top" align="center">0.853788</td>
<td valign="top" align="center">0.855131</td>
<td valign="top" align="center">0.852917</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.875</td>
<td valign="top" align="center">0.782738</td>
<td valign="top" align="center">0.79</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">0.853788</td>
<td valign="top" align="center">0.854435</td>
<td valign="top" align="center">0.853788</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.859335</td>
<td valign="top" align="center">0.790977</td>
<td valign="top" align="center">0.824348</td>
<td valign="top" align="center">0.941748</td>
<td valign="top" align="center">0.853788</td>
<td valign="top" align="center">0.854102</td>
<td valign="top" align="center">0.852713</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-SCHO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.843038</td>
<td valign="top" align="center">0.80625</td>
<td valign="top" align="center">0.840278</td>
<td valign="top" align="center">0.911672</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.850309</td>
<td valign="top" align="center">0.848645</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.867188</td>
<td valign="top" align="center">0.767857</td>
<td valign="top" align="center">0.806667</td>
<td valign="top" align="center">0.963333</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.851261</td>
<td valign="top" align="center">0.85</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.854942</td>
<td valign="top" align="center">0.786585</td>
<td valign="top" align="center">0.823129</td>
<td valign="top" align="center">0.936791</td>
<td valign="top" align="center">0.85</td>
<td valign="top" align="center">0.850362</td>
<td valign="top" align="center">0.848914</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-EHO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.85533</td>
<td valign="top" align="center">0.809524</td>
<td valign="top" align="center">0.838488</td>
<td valign="top" align="center">0.896875</td>
<td valign="top" align="center">0.850758</td>
<td valign="top" align="center">0.850054</td>
<td valign="top" align="center">0.849285</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.877604</td>
<td valign="top" align="center">0.758929</td>
<td valign="top" align="center">0.813333</td>
<td valign="top" align="center">0.956667</td>
<td valign="top" align="center">0.850758</td>
<td valign="top" align="center">0.851633</td>
<td valign="top" align="center">0.850758</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.866324</td>
<td valign="top" align="center">0.78341</td>
<td valign="top" align="center">0.825719</td>
<td valign="top" align="center">0.925806</td>
<td valign="top" align="center">0.850758</td>
<td valign="top" align="center">0.850315</td>
<td valign="top" align="center">0.849509</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">Samples</td>
<td valign="top" align="center">384</td>
<td valign="top" align="center">336</td>
<td valign="top" align="center">300</td>
<td valign="top" align="center">300</td>
<td/>
<td/>
<td/>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>5.3</label>
<title>L2 LightGBM</title>
<p><xref ref-type="table" rid="T6">Table 6</xref> presents a comparative evaluation of the L2 LightGBM classification models optimized using several metaheuristic algorithms, with the MCC serving as the main objective function. Among the methods examined, the proposed QSAVNS once again proved to be the most effective optimizer, achieving the highest best-case MCC value of 0.860430 (tied with PSO) and achieving competitive results across other statistical indicators. The GA algorithm recorded the best worst-case performance (0.804565) and the highest mean (0.840211) and median (0.845163) MCC values. In addition, GA demonstrated exceptional consistency between independent runs, indicating minimal stochastic variability and high overall reliability.</p>
<table-wrap position="float" id="T6">
<label>Table 6</label>
<caption><p>Results of the objective and indicator function evaluations for L2 LightGBM optimization.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Method</bold></th>
<th valign="top" align="center"><bold>Best</bold></th>
<th valign="top" align="center"><bold>Worst</bold></th>
<th valign="top" align="center"><bold>Mean</bold></th>
<th valign="top" align="center"><bold>Median</bold></th>
<th valign="top" align="center"><bold>Std</bold></th>
<th valign="top" align="center"><bold>Var</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">CNN-LGBM-QSAVNS</td>
<td valign="top" align="center">0.860430</td>
<td valign="top" align="center">0.735341</td>
<td valign="top" align="center">0.811917</td>
<td valign="top" align="center">0.815721</td>
<td valign="top" align="center">0.043857</td>
<td valign="top" align="center">0.001923</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-VNS</td>
<td valign="top" align="center">0.840972</td>
<td valign="top" align="center">0.747018</td>
<td valign="top" align="center">0.797422</td>
<td valign="top" align="center">0.803751</td>
<td valign="top" align="center">0.028748</td>
<td valign="top" align="center">0.000826</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-GA</td>
<td valign="top" align="center">0.859326</td>
<td valign="top" align="center">0.804565</td>
<td valign="top" align="center">0.840211</td>
<td valign="top" align="center">0.845163</td>
<td valign="top" align="center">0.015145</td>
<td valign="top" align="center">0.000229</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-PSO</td>
<td valign="top" align="center">0.860430</td>
<td valign="top" align="center">0.761823</td>
<td valign="top" align="center">0.802577</td>
<td valign="top" align="center">0.804399</td>
<td valign="top" align="center">0.025077</td>
<td valign="top" align="center">0.000629</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-ABC</td>
<td valign="top" align="center">0.827912</td>
<td valign="top" align="center">0.685182</td>
<td valign="top" align="center">0.733294</td>
<td valign="top" align="center">0.717491</td>
<td valign="top" align="center">0.043312</td>
<td valign="top" align="center">0.001876</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-BA</td>
<td valign="top" align="center">0.841007</td>
<td valign="top" align="center">0.742303</td>
<td valign="top" align="center">0.789529</td>
<td valign="top" align="center">0.784178</td>
<td valign="top" align="center">0.031955</td>
<td valign="top" align="center">0.001021</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-SCHO</td>
<td valign="top" align="center">0.845986</td>
<td valign="top" align="center">0.748065</td>
<td valign="top" align="center">0.792571</td>
<td valign="top" align="center">0.798346</td>
<td valign="top" align="center">0.027384</td>
<td valign="top" align="center">0.000750</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-EHO</td>
<td valign="top" align="center">0.847054</td>
<td valign="top" align="center">0.712497</td>
<td valign="top" align="center">0.800066</td>
<td valign="top" align="center">0.814890</td>
<td valign="top" align="center">0.041080</td>
<td valign="top" align="center">0.001688</td>
</tr>
<tr>
<td valign="top" align="left" colspan="7"><bold>Error rate</bold></td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-QSAVNS</td>
<td valign="top" align="center">0.104545</td>
<td valign="top" align="center">0.198485</td>
<td valign="top" align="center">0.140808</td>
<td valign="top" align="center">0.137879</td>
<td valign="top" align="center">0.032887</td>
<td valign="top" align="center">0.001082</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-VNS</td>
<td valign="top" align="center">0.118939</td>
<td valign="top" align="center">0.189394</td>
<td valign="top" align="center">0.151616</td>
<td valign="top" align="center">0.146970</td>
<td valign="top" align="center">0.021522</td>
<td valign="top" align="center">0.000463</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-GA</td>
<td valign="top" align="center">0.105303</td>
<td valign="top" align="center">0.146212</td>
<td valign="top" align="center">0.119545</td>
<td valign="top" align="center">0.115909</td>
<td valign="top" align="center">0.011332</td>
<td valign="top" align="center">0.000128</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-PSO</td>
<td valign="top" align="center">0.104545</td>
<td valign="top" align="center">0.178030</td>
<td valign="top" align="center">0.147727</td>
<td valign="top" align="center">0.146212</td>
<td valign="top" align="center">0.018717</td>
<td valign="top" align="center">0.000350</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-ABC</td>
<td valign="top" align="center">0.128788</td>
<td valign="top" align="center">0.235606</td>
<td valign="top" align="center">0.199646</td>
<td valign="top" align="center">0.211364</td>
<td valign="top" align="center">0.032457</td>
<td valign="top" align="center">0.001053</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-BA</td>
<td valign="top" align="center">0.118939</td>
<td valign="top" align="center">0.193182</td>
<td valign="top" align="center">0.157475</td>
<td valign="top" align="center">0.161364</td>
<td valign="top" align="center">0.023966</td>
<td valign="top" align="center">0.000574</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-SCHO</td>
<td valign="top" align="center">0.115152</td>
<td valign="top" align="center">0.188636</td>
<td valign="top" align="center">0.155202</td>
<td valign="top" align="center">0.150758</td>
<td valign="top" align="center">0.020575</td>
<td valign="top" align="center">0.000423</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-EHO</td>
<td valign="top" align="center">0.114394</td>
<td valign="top" align="center">0.215152</td>
<td valign="top" align="center">0.149646</td>
<td valign="top" align="center">0.138636</td>
<td valign="top" align="center">0.030779</td>
<td valign="top" align="center">0.000947</td>
</tr></tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="T6">Table 6</xref> also reports a comparative analysis of L2 LightGBM classifiers optimized using different metaheuristic strategies, this time based on the indicator function represented by the error rate. Among all algorithms tested, the proposed QSAVNS achieved the best overall outcome, with the lowest absolute error rate of 0.104545. While QSAVNS also produced competitive results for the remaining metrics, GA achieved the strongest worst-case performance (0.146212), along with the best mean error rates (0.119545) and median error (0.115909). Although GA did not reach the lowest absolute error, it exhibited the greatest consistency across repeated runs, demonstrating outstanding stability despite slightly weaker optimization performance compared to QSAVNS.</p>
<p><xref ref-type="table" rid="T7">Table 7</xref> provides detailed evaluation metrics for the top-performing L2 LightGBM classifiers optimized using the examined metaheuristic algorithms. The findings show that the QSAVNS-optimized model produced the most effective CNN-LightGBM configuration, achieving the highest overall classification accuracy of 0.895455 (matched by PSO) and consistently maintaining high precision, recall, and F1-scores in all evaluated classes. An important conclusion drawn from these results is that the integration of LightGBM into the second layer of the framework substantially enhanced overall accuracy compared to the standalone CNN architectures, while also improving the classification of individual stages of AD. Moreover, the LightGBM-based models in the second layer clearly outperformed their XGBoost counterparts, confirming the superior performance and adaptability of LightGBM within this hierarchical framework.</p>
<table-wrap position="float" id="T7">
<label>Table 7</label>
<caption><p>Comprehensive assessment of the best-performing L2 LightGBM models obtained through the optimization process.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Approach</bold></th>
<th valign="top" align="left"><bold>Metric</bold></th>
<th valign="top" align="center"><bold>0</bold></th>
<th valign="top" align="center"><bold>1</bold></th>
<th valign="top" align="center"><bold>2</bold></th>
<th valign="top" align="center"><bold>3</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
<th valign="top" align="center"><bold>Macro ave</bold></th>
<th valign="top" align="center"><bold>Weight ave</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-QSAVNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.878173</td>
<td valign="top" align="center">0.880645</td>
<td valign="top" align="center">0.880795</td>
<td valign="top" align="center">0.94586</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.896368</td>
<td valign="top" align="center">0.894781</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.901042</td>
<td valign="top" align="center">0.8125</td>
<td valign="top" align="center">0.886667</td>
<td valign="top" align="center">0.99</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.897552</td>
<td valign="top" align="center">0.895455</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.88946</td>
<td valign="top" align="center">0.845201</td>
<td valign="top" align="center">0.883721</td>
<td valign="top" align="center">0.967427</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.896452</td>
<td valign="top" align="center">0.89461</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-VNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.861111</td>
<td valign="top" align="center">0.849057</td>
<td valign="top" align="center">0.868687</td>
<td valign="top" align="center">0.951456</td>
<td valign="top" align="center">0.881061</td>
<td valign="top" align="center">0.882578</td>
<td valign="top" align="center">0.880297</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.888021</td>
<td valign="top" align="center">0.803571</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.881061</td>
<td valign="top" align="center">0.882898</td>
<td valign="top" align="center">0.881061</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.874359</td>
<td valign="top" align="center">0.825688</td>
<td valign="top" align="center">0.864322</td>
<td valign="top" align="center">0.965517</td>
<td valign="top" align="center">0.881061</td>
<td valign="top" align="center">0.882471</td>
<td valign="top" align="center">0.880407</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-GA</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.890374</td>
<td valign="top" align="center">0.854103</td>
<td valign="top" align="center">0.884106</td>
<td valign="top" align="center">0.952381</td>
<td valign="top" align="center">0.894697</td>
<td valign="top" align="center">0.895241</td>
<td valign="top" align="center">0.89381</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.867188</td>
<td valign="top" align="center">0.83631</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.894697</td>
<td valign="top" align="center">0.898374</td>
<td valign="top" align="center">0.894697</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.878628</td>
<td valign="top" align="center">0.845113</td>
<td valign="top" align="center">0.887043</td>
<td valign="top" align="center">0.97561</td>
<td valign="top" align="center">0.894697</td>
<td valign="top" align="center">0.896598</td>
<td valign="top" align="center">0.894051</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-PSO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.878173</td>
<td valign="top" align="center">0.880645</td>
<td valign="top" align="center">0.880795</td>
<td valign="top" align="center">0.94586</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.896368</td>
<td valign="top" align="center">0.894781</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.901042</td>
<td valign="top" align="center">0.8125</td>
<td valign="top" align="center">0.886667</td>
<td valign="top" align="center">0.99</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.897552</td>
<td valign="top" align="center">0.895455</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.88946</td>
<td valign="top" align="center">0.845201</td>
<td valign="top" align="center">0.883721</td>
<td valign="top" align="center">0.967427</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.896452</td>
<td valign="top" align="center">0.89461</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-ABC</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.866324</td>
<td valign="top" align="center">0.828571</td>
<td valign="top" align="center">0.850993</td>
<td valign="top" align="center">0.93949</td>
<td valign="top" align="center">0.871212</td>
<td valign="top" align="center">0.871345</td>
<td valign="top" align="center">0.869859</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.877604</td>
<td valign="top" align="center">0.776786</td>
<td valign="top" align="center">0.856667</td>
<td valign="top" align="center">0.983333</td>
<td valign="top" align="center">0.871212</td>
<td valign="top" align="center">0.873597</td>
<td valign="top" align="center">0.871212</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.871928</td>
<td valign="top" align="center">0.801843</td>
<td valign="top" align="center">0.853821</td>
<td valign="top" align="center">0.960912</td>
<td valign="top" align="center">0.871212</td>
<td valign="top" align="center">0.872126</td>
<td valign="top" align="center">0.870196</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-BA</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.868895</td>
<td valign="top" align="center">0.824451</td>
<td valign="top" align="center">0.876667</td>
<td valign="top" align="center">0.958333</td>
<td valign="top" align="center">0.881061</td>
<td valign="top" align="center">0.882087</td>
<td valign="top" align="center">0.879675</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.880208</td>
<td valign="top" align="center">0.782738</td>
<td valign="top" align="center">0.876667</td>
<td valign="top" align="center">0.996667</td>
<td valign="top" align="center">0.881061</td>
<td valign="top" align="center">0.88407</td>
<td valign="top" align="center">0.881061</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.874515</td>
<td valign="top" align="center">0.803053</td>
<td valign="top" align="center">0.876667</td>
<td valign="top" align="center">0.977124</td>
<td valign="top" align="center">0.881061</td>
<td valign="top" align="center">0.88284</td>
<td valign="top" align="center">0.880134</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-SCHO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.862694</td>
<td valign="top" align="center">0.83792</td>
<td valign="top" align="center">0.888889</td>
<td valign="top" align="center">0.958065</td>
<td valign="top" align="center">0.884848</td>
<td valign="top" align="center">0.886892</td>
<td valign="top" align="center">0.884017</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.867188</td>
<td valign="top" align="center">0.815476</td>
<td valign="top" align="center">0.88</td>
<td valign="top" align="center">0.99</td>
<td valign="top" align="center">0.884848</td>
<td valign="top" align="center">0.888166</td>
<td valign="top" align="center">0.884848</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.864935</td>
<td valign="top" align="center">0.826546</td>
<td valign="top" align="center">0.884422</td>
<td valign="top" align="center">0.97377</td>
<td valign="top" align="center">0.884848</td>
<td valign="top" align="center">0.887418</td>
<td valign="top" align="center">0.884328</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-EHO</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.859649</td>
<td valign="top" align="center">0.861635</td>
<td valign="top" align="center">0.874576</td>
<td valign="top" align="center">0.954545</td>
<td valign="top" align="center">0.885606</td>
<td valign="top" align="center">0.887602</td>
<td valign="top" align="center">0.885115</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.893229</td>
<td valign="top" align="center">0.815476</td>
<td valign="top" align="center">0.86</td>
<td valign="top" align="center">0.98</td>
<td valign="top" align="center">0.885606</td>
<td valign="top" align="center">0.887176</td>
<td valign="top" align="center">0.885606</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.876117</td>
<td valign="top" align="center">0.83792</td>
<td valign="top" align="center">0.867227</td>
<td valign="top" align="center">0.967105</td>
<td valign="top" align="center">0.885606</td>
<td valign="top" align="center">0.887093</td>
<td valign="top" align="center">0.885053</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">Samples</td>
<td valign="top" align="center">384</td>
<td valign="top" align="center">336</td>
<td valign="top" align="center">300</td>
<td valign="top" align="center">300</td>
<td/>
<td/>
<td/>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>5.4</label>
<title>Visual comparative analysis</title>
<p><xref ref-type="fig" rid="F2">Figure 2</xref> provides a detailed comparative analysis of different metaheuristic optimizers applied to fine-tune both hierarchical layers of the proposed classification framework for the identification of stages of AD. The evaluation covers three experimental setups: L1 CNN optimization (top row), L2 XGBoost optimization (middle row), and L2 LightGBM optimization (bottom row). To ensure statistical robustness, the performance of each optimizer was assessed in 30 independent runs, with the distributions of the MCC values illustrated through box plots. Thirty separate runs were necessary to obtain statistically meaningful results and reduce the influence of randomness inherent in metaheuristic algorithms (<xref ref-type="bibr" rid="B70">Talbi, 2009</xref>). These visualizations emphasize central tendencies (medians), variability, and asymmetry in distribution, offering a clear perspective on the balance between stability and exploratory dynamics exhibited by each optimization approach.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Scatter plots of the objective and indicator function results for the L1 CNN <bold>(top)</bold>, L2 XGBoost <bold>(middle)</bold>, and L2 LightGBM <bold>(bottom)</bold> experiments.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-20-1731812-g0002.tif">
<alt-text content-type="machine-generated">Box plots comparing algorithms for Alzheimer's disease stage detection. The left column shows objective values, indicating performance. The right column shows error rates, reflecting accuracy. Three rows compare CNN, CNN-XGB, and CNN-LGBM algorithms. Variations in box heights and positions highlight algorithm differences.</alt-text>
</graphic>
</fig>
<p>The results reveal that the proposed QSAVNS consistently achieved the highest best-case MCC values in all three experimental stages, confirming its strong global search capability. Additionally, the distribution of its results shows stable median values combined with slightly wider variance, indicating effective exploration that prevents premature convergence to local optima. This broader dispersion in MCC outcomes reflects a deliberate design trade-off, where superior best-run performance was obtained at the expense of slightly lower overall stability.</p>
<p>The box plots also summarize the statistical behavior of error rates collected from 30 independent optimization runs, illustrating both the central tendency and variability for each algorithm. These graphs are particularly valuable for assessing model generalization, lower median error rates coupled with narrower interquartile ranges correspond to more consistent and reliable predictive outcomes. Across all configurations, the proposed QSAVNS algorithm consistently achieved the lowest error rates, confirming its ability to preserve population diversity while efficiently exploiting promising regions of the search space. This balance effectively minimizes the risk of premature convergence and reduces misclassification tendencies.</p>
<p>The complementary convergence plots shown in <xref ref-type="fig" rid="F3">Figure 3</xref> provide additional insight into the temporal progression of the optimization process, illustrating how each algorithm improves the objective function across successive iterations during their best-performing runs. The proposed QSAVNS demonstrates faster and more stable convergence behavior, which can be attributed to its adaptive features, including the QRL-based initialization and the integrated stagnation-aware rollback mechanism. These components foster a balanced interaction between exploration and exploitation, ensuring consistent progress throughout the search process. In contrast, alternative optimization algorithms often exhibit slower performance gains or early stagnation, reflecting reduced effectiveness in navigating complex, high-dimensional hyperparameter spaces.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Convergence plots of the objective and indicator functions for the L1 CNN <bold>(top)</bold>, L2 XGBoost <bold>(middle)</bold>, and L2 LightGBM <bold>(bottom)</bold> experiments.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-20-1731812-g0003.tif">
<alt-text content-type="machine-generated">Six line graphs display convergence information for Alzheimer&#x02019;s Disease stage detection using CNN. The graphs show objective and error metrics over iterations for different models: CNN, CNN-XGB, and CNN-LGBM, each with variations. Lines illustrate performance with specific legend labels for easy interpretation. Each graph tracks either objective or error changes across iterations.</alt-text>
</graphic>
</fig>
<p>In general, these findings highlight the decisive impact of the algorithmic structure on the efficiency of optimization in classification tasks. Approaches that maintain population diversity, enable structured exploration of neighboring regions, and dynamically regulate diversity during the search exert a significant influence on the convergence rate, the quality of the solution and the reproducibility. Such characteristics are particularly critical in real-world applications, where sensitivity to initialization settings and variations in input data can substantially affect predictive stability and reliability.</p>
<p><xref ref-type="fig" rid="F4">Figure 4</xref> presents radar charts that summarize both macro and weighted-averaged results, offering a comprehensive depiction of classifier performance across multiple evaluation metrics. The macro-average treats all classes equally, making it particularly useful for assessing a model&#x00027;s ability to correctly recognize minority classes, an especially challenging aspect in scenarios characterized by class imbalance. In contrast, the weighted average accounts for the frequency of the classes, producing a metric that reflects the overall class distribution within the dataset and assigns greater importance to the performance of the majority classes.</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Radar charts illustrating macro and weighted-average evaluation metrics for the L1 CNN <bold>(top)</bold>, L2 XGBoost <bold>(middle)</bold>, and L2 LightGBM <bold>(bottom)</bold> simulations.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-20-1731812-g0004.tif">
<alt-text content-type="machine-generated">Radar charts displaying performance metrics with precision, recall, and F1-score on radial axes. The top left chart shows macro average comparisons for various models, including CNN-QSAVNS, CNN-ABC, etc. The top right chart reflects weighted averages for the same models. The middle row compares CNN-XGIB variations, and the bottom row shows CNN-LGBM versions, with macro average on the left and weighted average on the right. Lines in different colors represent different model variations.</alt-text>
</graphic>
</fig>
<p>Displaying these two perspectives side by side reveals the inherent trade-offs among the different optimization algorithms. Models that achieve high weighted-average values may still face challenges in generalizing to minority classes, whereas those demonstrating stronger macro-average results tend to exhibit greater resilience and robustness in imbalanced data contexts. Together, the radar plots provide a complementary means of analysis, supporting a more nuanced evaluation of generalization capability, fairness, and reliability of the classifiers optimized using the metaheuristic approaches examined.</p>
</sec>
<sec>
<label>5.5</label>
<title>Discussion</title>
<p>In the first stage of the proposed framework, CNN functions as a feature extraction engine, capturing hierarchical and discriminative representations from MRI data. However, the conventional practice of relying on a dense output layer for classification within CNN architectures often fails to fully exploit the richness of the extracted features, resulting in suboptimal predictive performance. Replacement of this terminal layer with advanced ensemble classifiers substantially enhances the accuracy and robustness of the model. Following this principle, the proposed framework substitutes the CNN&#x00027;s dense layer with XGBoost and LightGBM classifiers, both of which demonstrate superior performance relative to the baseline of the dense layer. By integrating CNN-based deep feature learning with gradient enhancement techniques for classification and refining both levels through metaheuristic-driven hyperparameter optimization, the framework effectively combines the strengths of deep representation learning and ensemble-based decision making. This hybrid configuration leads to marked improvements in predictive accuracy and computational efficiency for the classification of stages of Alzheimer&#x00027;s disease, and both L2 models outperform the CNN of the baseline in terms of classification accuracy.</p>
<p>Analysis of the fitness function, expressed through the MCC, shows that models incorporating LightGBM in the second layer (L2) consistently outperform those utilizing XGBoost. Both ensemble-based configurations exceed the CNN baseline in the first layer (L1). The box plot analyzes further reveal that L2 LightGBM achieves the highest median and maximum MCC values, confirming its superior capacity to identify subtle and complex discriminative features critical for accurate stage differentiation. Additionally, the convergence patterns of LightGBM display smooth and stable optimization behavior across all metaheuristic algorithms. This effect is most evident when optimized using QSAVNS, where LightGBM attains the highest recorded MCC values, substantially reinforcing the discriminative capacity of the framework for this clinically important classification problem.</p>
<p>A similar trend is observed for the indicator function, represented by the error rate, where lower values correspond to better performance. Across all three experimental configurations, LightGBM in the L2 layer consistently achieves the lowest error rates, often by a significant margin. The best-performing LightGBM configuration, CNN-LGBM-QSAVNS, achieved the highest overall classification accuracy of 0.895455. Although XGBoost in L2 also produced strong and competitive results, LightGBM demonstrated superior stability and generalization among different metaheuristic optimizers. Taken together, these results establish LightGBM as the most suitable choice for the second layer of the proposed AD stage classification framework, combining high predictive accuracy, low error rates, and consistent performance, qualities essential for reliable clinical implementation.</p>
<p>From the perspective of general optimization theory, the coupling of metaheuristic optimization with ensemble learning aligns with the principles of adaptive search in complex, high-dimensional search spaces. Metaheuristic algorithms are particularly effective in navigating non-convex and discontinuous objective spaces, where gradient-based or deterministic tuning methods often fail. When metaheuristics are combined with ensemble models like XGBoost and LightGBM in this study (which themselves rely on aggregating multiple weak learners), the optimization process benefits from complementary mechanisms of exploration and exploitation at both the parameter-search and decision-fusion levels. This synergy is consistent with the NFL theorem, which suggests that performance gains arise not from universally optimal algorithms, but from well-matched combinations of optimization strategies and learning models tailored to a specific problem domain.</p>
<p>To facilitate reproducibility of experimental results, the hyperparameter configurations for the best-performing models, L1 CNN, L2 XGBoost, and L2 LightGBM, are summarized in <xref ref-type="table" rid="T8">Table 8</xref>.</p>
<table-wrap-group position="float" id="T8">
<label>Table 8</label>
<caption><p>Selected hyperparameter configurations for the best-performing L1 CNN, L2 XGBoost, and L2 LightGBM architectures.</p></caption>
<table-wrap>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>L1 CNN</bold></th>
<th valign="top" align="center"><bold>LR</bold></th>
<th valign="top" align="center"><bold>Drop</bold></th>
<th valign="top" align="center"><bold>Epochs</bold></th>
<th valign="top" align="center"><bold>CNN-L</bold></th>
<th valign="top" align="center"><bold>Dense-L</bold></th>
<th valign="top" align="center"><bold>CNN1</bold></th>
<th valign="top" align="center"><bold>CNN2</bold></th>
<th valign="top" align="center"><bold>DL1</bold></th>
<th valign="top" align="center"><bold>DL2</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">CNN-QSAVNS</td>
<td valign="top" align="center">0.0013</td>
<td valign="top" align="center">0.1618</td>
<td valign="top" align="center">29</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">43</td>
<td valign="top" align="center">N/A</td>
<td valign="top" align="center">96</td>
<td valign="top" align="center">N/A</td>
</tr>
<tr>
<td valign="top" align="left">CNN-VNS</td>
<td valign="top" align="center">0.0002</td>
<td valign="top" align="center">0.1913</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">32</td>
<td valign="top" align="center">N/A</td>
<td valign="top" align="center">41</td>
<td valign="top" align="center">N/A</td>
</tr>
<tr>
<td valign="top" align="left">CNN-GA</td>
<td valign="top" align="center">0.001</td>
<td valign="top" align="center">0.0761</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">68</td>
<td valign="top" align="center">N/A</td>
<td valign="top" align="center">51</td>
<td valign="top" align="center">N/A</td>
</tr>
<tr>
<td valign="top" align="left">CNN-PSO</td>
<td valign="top" align="center">0.0009</td>
<td valign="top" align="center">0.0857</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">48</td>
<td valign="top" align="center">N/A</td>
<td valign="top" align="center">96</td>
<td valign="top" align="center">N/A</td>
</tr>
<tr>
<td valign="top" align="left">CNN-ABC</td>
<td valign="top" align="center">0.0005</td>
<td valign="top" align="center">0.1941</td>
<td valign="top" align="center">26</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">54</td>
<td valign="top" align="center">N/A</td>
<td valign="top" align="center">89</td>
<td valign="top" align="center">N/A</td>
</tr>
<tr>
<td valign="top" align="left">CNN-BA</td>
<td valign="top" align="center">0.0005</td>
<td valign="top" align="center">0.1407</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">69</td>
<td valign="top" align="center">63</td>
<td valign="top" align="center">90</td>
<td valign="top" align="center">N/A</td>
</tr>
<tr>
<td valign="top" align="left">CNN-SCHO</td>
<td valign="top" align="center">0.0021</td>
<td valign="top" align="center">0.05</td>
<td valign="top" align="center">30</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">39</td>
<td valign="top" align="center">N/A</td>
<td valign="top" align="center">36</td>
<td valign="top" align="center">N/A</td>
</tr>
<tr>
<td valign="top" align="left">CNN-EHO</td>
<td valign="top" align="center">0.0009</td>
<td valign="top" align="center">0.1372</td>
<td valign="top" align="center">13</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">52</td>
<td valign="top" align="center">69</td>
<td valign="top" align="center">93</td>
<td valign="top" align="center">32</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>L2 XGBoost</bold></th>
<th valign="top" align="center"><bold>LR</bold></th>
<th valign="top" align="center"><bold>MCW</bold></th>
<th valign="top" align="center"><bold>Sub-sample</bold></th>
<th valign="top" align="center"><bold>Co-sample</bold></th>
<th valign="top" align="center"><bold>Max depth</bold></th>
<th valign="top" align="center">&#x003B3;</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">CNN-XGB-QSAVNS</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.8418</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.4992</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-VNS</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1.4154</td>
<td valign="top" align="center">0.9191</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.1318</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-GA</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.9107</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.6213</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-PSO</td>
<td valign="top" align="center">0.8547</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.8005</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.1726</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-ABC</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">2.3408</td>
<td valign="top" align="center">0.7818</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.3861</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-BA</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.9072</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.0476</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-SCHO</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1.2104</td>
<td valign="top" align="center">0.883</td>
<td valign="top" align="center">0.6711</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0</td>
</tr>
<tr>
<td valign="top" align="left">CNN-XGB-EHO</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.1096</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>L2 LightGBM</bold></th>
<th valign="top" align="center"><bold>Rounds</bold></th>
<th valign="top" align="center"><bold>Max depth</bold></th>
<th valign="top" align="center"><bold>Leaves</bold></th>
<th valign="top" align="center"><bold>MCW</bold></th>
<th valign="top" align="center"><bold>FF</bold></th>
<th valign="top" align="center"><bold>BF</bold></th>
<th valign="top" align="center"><bold>MSG</bold></th>
<th valign="top" align="center">&#x003BB; <bold>L1</bold></th>
<th valign="top" align="center">&#x003BB; <bold>L2</bold></th>
<th valign="top" align="center"><bold>lr</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">CNN-LGBM-QSAVNS</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.1</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-VNS</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">9</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">3</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0.0022</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">1.0995</td>
<td valign="top" align="center">0.898</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-GA</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">8</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.614</td>
<td valign="top" align="center">0.0792</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.1935</td>
<td valign="top" align="center">0.8966</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-PSO</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">4</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0.0997</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-ABC</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.7564</td>
<td valign="top" align="center">0.0678</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.7786</td>
<td valign="top" align="center">0.8659</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-BA</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">0.5</td>
<td valign="top" align="center">0.0831</td>
<td valign="top" align="center">0.3253</td>
<td valign="top" align="center">0.3123</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-SCHO</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">0.8696</td>
<td valign="top" align="center">0.8579</td>
<td valign="top" align="center">0.0796</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.8734</td>
</tr>
<tr>
<td valign="top" align="left">CNN-LGBM-EHO</td>
<td valign="top" align="center">20</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">10</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.9</td>
<td valign="top" align="center">1</td>
<td valign="top" align="center">0.1</td>
<td valign="top" align="center">1.2477</td>
<td valign="top" align="center">0</td>
<td valign="top" align="center">0.9</td>
</tr>
</tbody>
</table>
</table-wrap>
</table-wrap-group>
</sec>
</sec>
<sec id="s6">
<label>6</label>
<title>Validation and interpretation</title>
<sec>
<label>6.1</label>
<title>Comparisons to baselines</title>
<p>To further evaluate the performance of the proposed framework, the best second-layer (L2) models were compared with a set of well-established benchmark classifiers. The benchmark suite included a multi-layer perceptron (MLP), decision tree (DT) (<xref ref-type="bibr" rid="B17">de Ville, 2013</xref>), k-nearest neighbors (KNN) (<xref ref-type="bibr" rid="B35">Kramer, 2013</xref>), random forest (RF) (<xref ref-type="bibr" rid="B12">Breiman, 2001</xref>), and several boosting algorithms, AdaBoost, CatBoost, plain LightGBM (<xref ref-type="bibr" rid="B33">Ke et al., 2017</xref>), and plain XGBoost (<xref ref-type="bibr" rid="B14">Chen and Guestrin, 2016</xref>), as well as a deep CNN (<xref ref-type="bibr" rid="B23">Gu et al., 2018</xref>). All baseline classifiers were trained and tested using their default hyperparameter configurations, and the resulting evaluation metrics are summarized in <xref ref-type="table" rid="T9">Table 9</xref>.</p>
<table-wrap position="float" id="T9">
<label>Table 9</label>
<caption><p>Comparison of the best-performing L2 models with baseline classification algorithms.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Approach</bold></th>
<th valign="top" align="left"><bold>Metric</bold></th>
<th valign="top" align="center"><bold>0</bold></th>
<th valign="top" align="center"><bold>1</bold></th>
<th valign="top" align="center"><bold>2</bold></th>
<th valign="top" align="center"><bold>3</bold></th>
<th valign="top" align="center"><bold>Accuracy</bold></th>
<th valign="top" align="center"><bold>Macro ave</bold></th>
<th valign="top" align="center"><bold>Weight ave</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="3">CNN-XGB-QSAVNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.844059</td>
<td valign="top" align="center">0.852349</td>
<td valign="top" align="center">0.824503</td>
<td valign="top" align="center">0.917722</td>
<td valign="top" align="center">0.859091</td>
<td valign="top" align="center">0.859658</td>
<td valign="top" align="center">0.858466</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.888021</td>
<td valign="top" align="center">0.755952</td>
<td valign="top" align="center">0.83</td>
<td valign="top" align="center">0.966667</td>
<td valign="top" align="center">0.859091</td>
<td valign="top" align="center">0.860160</td>
<td valign="top" align="center">0.859091</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.865482</td>
<td valign="top" align="center">0.801262</td>
<td valign="top" align="center">0.827243</td>
<td valign="top" align="center">0.941558</td>
<td valign="top" align="center">0.859091</td>
<td valign="top" align="center">0.858886</td>
<td valign="top" align="center">0.857734</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN-LGBM-QSAVNS</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.878173</td>
<td valign="top" align="center">0.880645</td>
<td valign="top" align="center">0.880795</td>
<td valign="top" align="center">0.94586</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.896368</td>
<td valign="top" align="center">0.894781</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.901042</td>
<td valign="top" align="center">0.8125</td>
<td valign="top" align="center">0.886667</td>
<td valign="top" align="center">0.99</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.897552</td>
<td valign="top" align="center">0.895455</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.88946</td>
<td valign="top" align="center">0.845201</td>
<td valign="top" align="center">0.883721</td>
<td valign="top" align="center">0.967427</td>
<td valign="top" align="center">0.895455</td>
<td valign="top" align="center">0.896452</td>
<td valign="top" align="center">0.89461</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">MLP</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.633609</td>
<td valign="top" align="center">0.395189</td>
<td valign="top" align="center">0.408537</td>
<td valign="top" align="center">0.704142</td>
<td valign="top" align="center">0.543182</td>
<td valign="top" align="center">0.535369</td>
<td valign="top" align="center">0.537798</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.598958</td>
<td valign="top" align="center">0.342262</td>
<td valign="top" align="center">0.446667</td>
<td valign="top" align="center">0.793333</td>
<td valign="top" align="center">0.543182</td>
<td valign="top" align="center">0.545305</td>
<td valign="top" align="center">0.543182</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.615797</td>
<td valign="top" align="center">0.366826</td>
<td valign="top" align="center">0.426752</td>
<td valign="top" align="center">0.746082</td>
<td valign="top" align="center">0.543182</td>
<td valign="top" align="center">0.538864</td>
<td valign="top" align="center">0.539068</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">DT</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.505</td>
<td valign="top" align="center">0.329446</td>
<td valign="top" align="center">0.394265</td>
<td valign="top" align="center">0.647651</td>
<td valign="top" align="center">0.468182</td>
<td valign="top" align="center">0.469091</td>
<td valign="top" align="center">0.467567</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.526042</td>
<td valign="top" align="center">0.33631</td>
<td valign="top" align="center">0.366667</td>
<td valign="top" align="center">0.643333</td>
<td valign="top" align="center">0.468182</td>
<td valign="top" align="center">0.468088</td>
<td valign="top" align="center">0.468182</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.515306</td>
<td valign="top" align="center">0.332842</td>
<td valign="top" align="center">0.379965</td>
<td valign="top" align="center">0.645485</td>
<td valign="top" align="center">0.468182</td>
<td valign="top" align="center">0.4684</td>
<td valign="top" align="center">0.467688</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">KNN</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.527542</td>
<td valign="top" align="center">0.353801</td>
<td valign="top" align="center">0.407563</td>
<td valign="top" align="center">0.738806</td>
<td valign="top" align="center">0.503788</td>
<td valign="top" align="center">0.506928</td>
<td valign="top" align="center">0.504064</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.648438</td>
<td valign="top" align="center">0.360119</td>
<td valign="top" align="center">0.323333</td>
<td valign="top" align="center">0.66</td>
<td valign="top" align="center">0.503788</td>
<td valign="top" align="center">0.497972</td>
<td valign="top" align="center">0.503788</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.581776</td>
<td valign="top" align="center">0.356932</td>
<td valign="top" align="center">0.360595</td>
<td valign="top" align="center">0.697183</td>
<td valign="top" align="center">0.503788</td>
<td valign="top" align="center">0.499121</td>
<td valign="top" align="center">0.500503</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">RF</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.594937</td>
<td valign="top" align="center">0.423567</td>
<td valign="top" align="center">0.460993</td>
<td valign="top" align="center">0.68693</td>
<td valign="top" align="center">0.548485</td>
<td valign="top" align="center">0.541607</td>
<td valign="top" align="center">0.541781</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.611979</td>
<td valign="top" align="center">0.395833</td>
<td valign="top" align="center">0.433333</td>
<td valign="top" align="center">0.753333</td>
<td valign="top" align="center">0.548485</td>
<td valign="top" align="center">0.54862</td>
<td valign="top" align="center">0.548485</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.603338</td>
<td valign="top" align="center">0.409231</td>
<td valign="top" align="center">0.446735</td>
<td valign="top" align="center">0.718601</td>
<td valign="top" align="center">0.548485</td>
<td valign="top" align="center">0.544476</td>
<td valign="top" align="center">0.544533</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">LGBM</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.638356</td>
<td valign="top" align="center">0.387755</td>
<td valign="top" align="center">0.433657</td>
<td valign="top" align="center">0.739274</td>
<td valign="top" align="center">0.548485</td>
<td valign="top" align="center">0.549761</td>
<td valign="top" align="center">0.55098</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.606771</td>
<td valign="top" align="center">0.395833</td>
<td valign="top" align="center">0.446667</td>
<td valign="top" align="center">0.746667</td>
<td valign="top" align="center">0.548485</td>
<td valign="top" align="center">0.548984</td>
<td valign="top" align="center">0.548485</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.622163</td>
<td valign="top" align="center">0.391753</td>
<td valign="top" align="center">0.440066</td>
<td valign="top" align="center">0.742952</td>
<td valign="top" align="center">0.548485</td>
<td valign="top" align="center">0.549233</td>
<td valign="top" align="center">0.549579</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CB</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.606299</td>
<td valign="top" align="center">0.389571</td>
<td valign="top" align="center">0.451724</td>
<td valign="top" align="center">0.708978</td>
<td valign="top" align="center">0.543939</td>
<td valign="top" align="center">0.539143</td>
<td valign="top" align="center">0.539337</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.601563</td>
<td valign="top" align="center">0.377976</td>
<td valign="top" align="center">0.436667</td>
<td valign="top" align="center">0.763333</td>
<td valign="top" align="center">0.543939</td>
<td valign="top" align="center">0.544885</td>
<td valign="top" align="center">0.543939</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.603922</td>
<td valign="top" align="center">0.383686</td>
<td valign="top" align="center">0.444068</td>
<td valign="top" align="center">0.735152</td>
<td valign="top" align="center">0.543939</td>
<td valign="top" align="center">0.541707</td>
<td valign="top" align="center">0.541356</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">XGBoost</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.594737</td>
<td valign="top" align="center">0.400612</td>
<td valign="top" align="center">0.439344</td>
<td valign="top" align="center">0.737013</td>
<td valign="top" align="center">0.543939</td>
<td valign="top" align="center">0.542926</td>
<td valign="top" align="center">0.542342</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.588542</td>
<td valign="top" align="center">0.389881</td>
<td valign="top" align="center">0.446667</td>
<td valign="top" align="center">0.756667</td>
<td valign="top" align="center">0.543939</td>
<td valign="top" align="center">0.545439</td>
<td valign="top" align="center">0.543939</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.591623</td>
<td valign="top" align="center">0.395173</td>
<td valign="top" align="center">0.442975</td>
<td valign="top" align="center">0.746711</td>
<td valign="top" align="center">0.543939</td>
<td valign="top" align="center">0.544121</td>
<td valign="top" align="center">0.543081</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">AdaBoost</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.532632</td>
<td valign="top" align="center">0.396154</td>
<td valign="top" align="center">0.408889</td>
<td valign="top" align="center">0.641667</td>
<td valign="top" align="center">0.514394</td>
<td valign="top" align="center">0.494835</td>
<td valign="top" align="center">0.494549</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.658854</td>
<td valign="top" align="center">0.306548</td>
<td valign="top" align="center">0.306667</td>
<td valign="top" align="center">0.77</td>
<td valign="top" align="center">0.514394</td>
<td valign="top" align="center">0.510517</td>
<td valign="top" align="center">0.514394</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.589057</td>
<td valign="top" align="center">0.345638</td>
<td valign="top" align="center">0.350476</td>
<td valign="top" align="center">0.7</td>
<td valign="top" align="center">0.514394</td>
<td valign="top" align="center">0.496293</td>
<td valign="top" align="center">0.498087</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">CNN</td>
<td valign="top" align="left">Precision</td>
<td valign="top" align="center">0.489583</td>
<td valign="top" align="center">0.388646</td>
<td valign="top" align="center">0.445545</td>
<td valign="top" align="center">0.709265</td>
<td valign="top" align="center">0.517424</td>
<td valign="top" align="center">0.50826</td>
<td valign="top" align="center">0.503809</td>
</tr>
<tr>
<td valign="top" align="left">Recall</td>
<td valign="top" align="center">0.734375</td>
<td valign="top" align="center">0.264881</td>
<td valign="top" align="center">0.3</td>
<td valign="top" align="center">0.74</td>
<td valign="top" align="center">0.517424</td>
<td valign="top" align="center">0.509814</td>
<td valign="top" align="center">0.517424</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.5875</td>
<td valign="top" align="center">0.315044</td>
<td valign="top" align="center">0.358566</td>
<td valign="top" align="center">0.724307</td>
<td valign="top" align="center">0.517424</td>
<td valign="top" align="center">0.496354</td>
<td valign="top" align="center">0.49721</td>
</tr>
<tr>
<td/>
<td valign="top" align="left">Samples</td>
<td valign="top" align="center">384</td>
<td valign="top" align="center">336</td>
<td valign="top" align="center">300</td>
<td valign="top" align="center">300</td>
<td/>
<td/>
<td/>
</tr></tbody>
</table>
</table-wrap>
<p>Although the benchmark models demonstrated generally solid accuracy, the proposed L2 architectures consistently outperformed them in all evaluation criteria, achieving superior class-wise results and substantially higher overall accuracy. The best performing model, CNN-LGBM-QSAVNS, achieved an accuracy of 0.895455, followed by CNN-XGB-QSAVNS with 0.859091. In comparison, the best-performing baseline models, plain LightGBM and random forest, reached considerably lower accuracies of 0.548485 under the same experimental conditions.</p></sec>
<sec>
<label>6.2</label>
<title>Statistical analysis</title>
<p>Although comparative analysis of optimization algorithms can offer valuable information, conclusions drawn from a single execution are inherently unreliable. The stochastic nature of metaheuristic methods introduces significant variability between runs, rendering single-instance results insufficient to accurately evaluate overall performance. To mitigate randomness and improve the robustness of the evaluation, each algorithm in this study was executed 30 times with independent random seeds. This procedure yielded comprehensive distributions of the results, providing a statistically sound foundation for comparison. Such a multi-run evaluation protocol not only strengthens statistical validity but also enables more accurate identification of performance trends. In addition, this methodology aligns with widely accepted best practices for benchmarking metaheuristic algorithms (<xref ref-type="bibr" rid="B38">LaTorre et al., 2021</xref>), thus improving both the credibility and reproducibility of the study&#x00027;s results.</p>
<p>Statistical procedures for determining the significance of performance differences among groups are generally divided into parametric and non-parametric tests. The choice between them depends on assumptions such as the independence of observations, normality of the data distribution, and equality of variances between groups (homoscedasticity) (<xref ref-type="bibr" rid="B38">LaTorre et al., 2021</xref>). Independence was ensured by initializing each algorithmic run with a distinct random seed, preventing inter-run dependencies. Homoscedasticity was examined using the Levene test (<xref ref-type="bibr" rid="B65">Schultz, 1985</xref>), which produced a <italic>p</italic>-value of 0.88 for all experimental results, indicating that there are no statistically significant variance differences between the groups. The assumption of normality was then tested with the Shapiro&#x02013;Wilk test (<xref ref-type="bibr" rid="B67">Shapiro and Francia, 1972</xref>). Since all computed <italic>p</italic>-values were below the standard 0.05 threshold, the null hypothesis of normality was rejected, confirming that the data did not satisfy the conditions required for parametric statistical tests.</p>
<p>Given the violation of normality, subsequent analyzes employed non-parametric methods. Specifically, the Wilcoxon signed-rank test (<xref ref-type="bibr" rid="B75">Woolson, 2005</xref>) was applied to perform pairwise comparisons between the proposed QSAVNS and each of the competing optimization algorithms. The resulting <italic>p</italic>-values, listed in <xref ref-type="table" rid="T10">Table 10</xref>, were all below the conventional significance level of &#x003B1; &#x0003D; 0.05, confirming that QSAVNS achieved statistically significant improvements over all alternative approaches.</p>
<table-wrap position="float" id="T10">
<label>Table 10</label>
<caption><p>Wilcoxon test results comparing the QSAVNS algorithm with alternative optimizers across the three experimental configurations.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>QSAVNS vs. others</bold></th>
<th valign="top" align="center"><bold>VNS</bold></th>
<th valign="top" align="center"><bold>GA</bold></th>
<th valign="top" align="center"><bold>PSO</bold></th>
<th valign="top" align="center"><bold>ABC</bold></th>
<th valign="top" align="center"><bold>BA</bold></th>
<th valign="top" align="center"><bold>SCHO</bold></th>
<th valign="top" align="center"><bold>EHO</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">L1 CNN</td>
<td valign="top" align="center">0.034</td>
<td valign="top" align="center">0.027</td>
<td valign="top" align="center">0.041</td>
<td valign="top" align="center">0.042</td>
<td valign="top" align="center">0.041</td>
<td valign="top" align="center">0.044</td>
<td valign="top" align="center">0.037</td>
</tr>
<tr>
<td valign="top" align="left">L2 XGBoost</td>
<td valign="top" align="center">0.029</td>
<td valign="top" align="center">0.039</td>
<td valign="top" align="center">0.035</td>
<td valign="top" align="center">0.023</td>
<td valign="top" align="center">0.033</td>
<td valign="top" align="center">0.034</td>
<td valign="top" align="center">0.036</td>
</tr>
<tr>
<td valign="top" align="left">L2 LightGBM</td>
<td valign="top" align="center">0.037</td>
<td valign="top" align="center">0.044</td>
<td valign="top" align="center">0.041</td>
<td valign="top" align="center">0.023</td>
<td valign="top" align="center">0.032</td>
<td valign="top" align="center">0.035</td>
<td valign="top" align="center">0.039</td>
</tr></tbody>
</table>
</table-wrap>
<p>These results provide strong empirical evidence that the superior performance of QSAVNS is not due to random fluctuations or sampling bias. Instead, they confirm a consistent and meaningful advantage across all three experimental configurations, underscoring both the robustness and the practical effectiveness of the proposed enhanced optimization method.</p>
</sec>
<sec>
<label>6.3</label>
<title>Best model interpretation</title>
<p>The practical importance of machine learning classifiers extends beyond achieving high predictive accuracy to encompass the interpretability and transparency of their internal decision-making processes. Interpretability provides crucial insights into the mechanisms underlying algorithmic predictions, allowing the detection of hidden biases, the identification of key predictive features, and the refinement of analytical workflows. This transparency is especially valuable for improving data acquisition, feature engineering, and preprocessing procedures, ultimately contributing to the development of more reliable and trustworthy models. In image-based analysis, specifically, understanding which features exert the greatest influence on classification outcomes improves both the explanatory depth and the practical applicability of the model. However, as machine learning architectures, particularly DL systems, grow increasingly complex, achieving interpretability becomes substantially more difficult. The deeper and more intricate the model, the less transparent its internal reasoning tends to be, making it challenging to trace errors, identify sources of bias, or align algorithmic logic with human understanding. This opacity can erode trust in automated systems, particularly in high-stakes domains such as healthcare, where accountability and interpretability are essential.</p>
<p>To address these challenges, the present study employed SHAP (SHapley Additive exPlanations) (<xref ref-type="bibr" rid="B40">Lundberg and Lee, 2017</xref>) within the proposed two-tier classification framework. SHAP offers a unified and theoretically grounded approach to interpreting model predictions by quantifying the contribution of each input feature, thereby clarifying how specific factors influence decision outcomes. In this research, the standard SHAP methodology was applied directly to the output of models optimized with the QSAVNS algorithm, without any algorithmic modifications. This interpretive layer proved critical for identifying the most influential features that affect classification performance, an especially important consideration in clinical contexts, where understanding the rationale behind predictions is as vital as their accuracy. The kernel explainer variant of SHAP was used to examine the proposed multi-level system, effectively isolating the relative contributions of the CNN-based feature extraction stage, the ensemble classifiers, and the metaheuristic optimization process. This approach provided a comprehensive and transparent understanding of how the hybrid framework generates its final predictions.</p>
<p>For the interpretation of CNN-based models, the deep explainer variant of SHAP was used to identify and visualize the most influential features in the convolutional layers, offering a detailed representation of the internal reasoning of the model. The results of the multi-class classification analysis are illustrated in <xref ref-type="fig" rid="F5">Figure 5</xref>, which contrasts interpretations obtained from the deep SHAP explainer with those generated using the kernel-based SHAP method applied to the XGBoost and LightGBM multi-tier frameworks. These comparative visualizations clarify how individual input features contribute across different layers of the models, providing a more comprehensive and transparent understanding of their underlying predictive mechanisms.</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Best-performing QSAVNS-optimized multi-class LightGBM model, showing results for Class 0 (No Dementia), Class 1 (Very Mild), Class 2 (Mild), and Class 3 (Moderate).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fncom-20-1731812-g0005.tif">
<alt-text content-type="machine-generated">Four panels compare brain images with SHAP value visualizations for different dementia stages: No Dementia, Very Mild, Mild, and Moderate. Each panel has original brain scan images and corresponding SHAP images showing top 150 dots, indicating feature importance. Color bars below represent SHAP values from minus one to one, with gradients from blue to red.</alt-text>
</graphic>
</fig>
<p>SHAP visualizations for the four classes of AD reveal distinct contribution patterns that evolve with disease progression. For the No Dementia class, the SHAP value distribution is relatively balanced, encompassing both positive and negative contributions. This balance suggests that the model&#x00027;s decisions for this class rely on a wide and diverse range of features, some reinforcing and others opposing the non-dementia classification, indicating that the decision-making process is based on varied and diffuse characteristics. In contrast, the Very Mild and Mild Dementia classes display more compact SHAP clusters, signifying that a smaller set of features exerts a stronger influence on the predictions. This concentration is in line with clinical expectations, as the early stages of Alzheimer&#x00027;s are marked by subtle, localized structural or functional alterations that serve as emerging differentiation signals. For the Moderate Dementia class, SHAP distributions become markedly polarized, revealing that a limited number of dominant features almost exclusively drive the model&#x00027;s predictions as the disease advances.</p>
<p>Together, these results illustrate a progression-dependent landscape of significance. During the earlier stages of the disease, the predictive reasoning of the model is based on a broad and heterogeneous collection of features, reflecting the inherent diagnostic ambiguity associated with early detection of Alzheimer&#x00027;s. As the condition progresses, the focus of the model narrows to a smaller group of highly discriminative features, consistent with the emergence of more distinct and stable pathological patterns. From a clinical perspective, this evolution mirrors real-world diagnostic challenges, while early-stage Alzheimer&#x00027;s detection depends on the recognition of subtle and diffuse anomalies, advanced stages present more pronounced and easily identifiable biomarkers. Consequently, SHAP-based interpretive analysis not only validates the predictive reliability of the proposed framework but also provides valuable clinical insight into which neuroimaging characteristics have the greatest diagnostic relevance in different phases of the progression of Alzheimer&#x00027;s disease.</p></sec></sec>
<sec sec-type="conclusions" id="s7">
<label>7</label>
<title>Conclusion</title>
<p>Integration of accurate stage-classification models into clinical workflows has significant policy and operational implications for the management of AD. Early and precise stratification of patients across disease stages enables clinicians and healthcare systems to make more informed decisions regarding treatment planning, intensity of care, and allocation of specialized, often scarce, resources. Predictive insights generated by ML models can support the prioritization of critical interventions such as advanced neuroimaging, neuropsychological assessments, or enrollment in clinical trials, particularly in settings with limited diagnostic capacity and funding. For policymakers, these technologies provide the foundation for adaptive care pathways that dynamically align healthcare delivery with patient-specific needs, thus improving both system efficiency and individual patient outcomes.</p>
<p>Beyond direct clinical applications, the deployment of such classification frameworks has broader long-term implications for the design and strategy of healthcare systems. Reliable prediction of AD stages can contribute to the creation of standardized evidence-based protocols for diagnosis, monitoring, and transitions between different levels of care. This reduces clinical variability, improves diagnostic consistency, and promotes equitable access to specialized treatments. In addition, longitudinal datasets produced by AI-enabled diagnostic systems can inform national dementia strategies, guide preventive interventions for at-risk populations, and support the development of reimbursement models that emphasize measurable health outcomes.</p>
<p>However, the realization of these benefits depends on the establishment of comprehensive policy frameworks that address the ethical, legal, and technical challenges associated with the integration of AI in healthcare. Key priorities include enforcing rigorous standards for data privacy and governance, ensuring algorithmic transparency and interpretability, and implementing robust training programs to prepare clinicians, data scientists, and administrators to critically assess and safely use AI-based tools. Only through such safeguards can the integration of intelligent stage-classification systems achieve both clinical reliability and public trust, ultimately fostering a responsible and sustainable application of AI in real-world medical environments.</p>
<p>Accurate classification methods are essential to understand and manage the progression of AD, as they allow for precise staging that directly informs therapeutic strategies, supports continuous monitoring and improves patient quality of life. Because clinical differentiation between early, intermediate, and advanced stages is vital for determining both the timing and intensity of interventions, reliable stratification tools play a key role in guiding treatment selection, prioritizing clinical resources, and informing long-term prognostic decisions. In addition, advanced classification frameworks have the capability to uncover subtle, multidimensional patterns within neuroimaging and clinical datasets that are often undetectable using conventional diagnostic approaches.</p>
<p>To address these challenges, this study proposed a two-tier hybrid framework that integrates CNNs for feature extraction with ensemble learning classifiers, specifically XGBoost and LightGBM, for AD stage prediction. The performance of the model was further enhanced through metaheuristic-driven hyperparameter optimization, utilizing a customized variant of the VNS algorithm specifically adapted for this purpose. The framework was evaluated on publicly available AD datasets in a multi-class classification setting aimed at distinguishing among distinct disease stages. The best-performing configuration, CNN-based feature extraction combined with LightGBM classification optimized through the proposed QSAVNS algorithm, achieved a maximum precision of 89.55%, representing a significant improvement in both predictive accuracy and stage identification reliability.</p>
<p>Comprehensive statistical analyzes validated the superiority of the proposed approach compared to standard VNS and other widely used metaheuristic optimization algorithms. To enhance interpretability and model transparency, a SHAP analysis was applied to the best-performing configuration. Feature vectors extracted from the CNN&#x00027;s post-dropout layer were entered into the LightGBM classifier and SHAP values were calculated to quantify the contribution of individual features to the model&#x00027;s predictions, thus elucidating its internal decision-making process.</p>
<p>The proposed methodology introduces several distinct advantages. The tailored QSAVNS optimizer consistently outperformed existing metaheuristic algorithms, while the dual-layer architecture achieved substantially higher classification accuracy than the baseline CNN models without introducing excessive computational complexity. From a clinical point of view, this hybrid model shows great potential for real-world deployment in the diagnosis and management of AD. Accurate stage classification facilitates earlier detection, more targeted treatment planning, and improved prognostic assessment, ultimately contributing to the development of more personalized, effective, and adaptive care strategies for individuals affected by Alzheimer&#x00027;s disease.</p>
<p>Nevertheless, several limitations of this study should be recognized. The comparative evaluation included a relatively narrow set of optimization algorithms and was limited by modest population sizes and iteration counts. Future investigations will seek to address these constraints by broadening the scope of metaheuristic techniques considered and performing larger-scale experimental analyses, depending on the availability of greater computational resources. Such expansions are expected to yield deeper insights and more broadly generalizable findings. Furthermore, the proposed QSARSA algorithm demonstrates strong potential for adaptation to a wide range of ML tasks that demand sophisticated hyperparameter optimization. Extending this framework to handle real-time or streaming neuroimaging data also represents a promising avenue to advance clinical decision-support systems that aim to improve the diagnosis and treatment of AD.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s8">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found here: <ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/datasets/aryansinghal10/alzheimers-multiclass-dataset-equal-and-augmented">https://www.kaggle.com/datasets/aryansinghal10/alzheimers-multiclass-dataset-equal-and-augmented</ext-link>.</p>
</sec>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>LA: Visualization, Writing &#x02013; review &#x00026; editing. SA: Funding acquisition, Resources, Supervision, Validation, Writing &#x02013; original draft. MM: Software, Supervision, Validation, Writing &#x02013; review &#x00026; editing. DB: Formal analysis, Funding acquisition, Investigation, Writing &#x02013; review &#x00026; editing. MZ: Project administration, Supervision, Writing &#x02013; review &#x00026; editing. TZ: Formal analysis, Investigation, Writing &#x02013; review &#x00026; editing. MA: Data curation, Formal analysis, Methodology, Project administration, Visualization, Writing &#x02013; review &#x00026; editing. NB: Funding acquisition, Investigation, Methodology, Project administration, Writing &#x02013; original draft.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The authors MZ, NB and MA declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. Artificial intelligence-based tools were used in this paper solely to assist with stylistic and grammatical refinement, including paraphrasing for clarity, correcting language errors, and improving sentence flow. All scientific content, analyses, interpretations, and conclusions are entirely the authors&#x00027; original work.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Abualigah</surname> <given-names>L.</given-names></name> <name><surname>Elaziz</surname> <given-names>M. A.</given-names></name> <name><surname>Sumari</surname> <given-names>P.</given-names></name> <name><surname>Geem</surname> <given-names>Z. W.</given-names></name> <name><surname>Gandomi</surname> <given-names>A. H.</given-names></name></person-group> (<year>2022</year>). <article-title>Reptile search algorithm (RSA): a nature-inspired meta-heuristic optimizer</article-title>. <source>Expert Syst. Appl</source>. <volume>191</volume>:<fpage>116158</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2021.116158</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Al-Betar</surname> <given-names>M. A.</given-names></name> <name><surname>Awadallah</surname> <given-names>M. A.</given-names></name> <name><surname>Braik</surname> <given-names>M. S.</given-names></name> <name><surname>Makhadmeh</surname> <given-names>S.</given-names></name> <name><surname>Doush</surname> <given-names>I. A.</given-names></name></person-group> (<year>2024</year>). <article-title>Elk herd optimizer: a novel nature-inspired metaheuristic algorithm</article-title>. <source>Artif. Intell. Rev</source>. <volume>57</volume>:<fpage>48</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s10462-023-10680-4</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Antonijevic</surname> <given-names>M.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Kaljevic</surname> <given-names>J.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;Using bert with modified metaheuristic optimized xgboost for phishing email identification,&#x0201D;</article-title> in <source>International Conference on Artificial Intelligence and Smart Energy</source> (<publisher-loc>Springer</publisher-loc>), <fpage>358</fpage>&#x02013;<lpage>370</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-61475-0_28</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Antonijevic</surname> <given-names>M.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Djuric Jovicic</surname> <given-names>M.</given-names></name> <name><surname>Nikolic</surname> <given-names>B.</given-names></name> <name><surname>Perisic</surname> <given-names>J.</given-names></name> <name><surname>Milovanovic</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Intrusion detection in metaverse environment internet of things systems by metaheuristics tuned two level framework</article-title>. <source>Sci. Rep</source>. <volume>15</volume>:<fpage>3555</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-025-88135-9</pub-id><pub-id pub-id-type="pmid">39875592</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arya</surname> <given-names>A. D.</given-names></name> <name><surname>Verma</surname> <given-names>S. S.</given-names></name> <name><surname>Chakarabarti</surname> <given-names>P.</given-names></name> <name><surname>Chakrabarti</surname> <given-names>T.</given-names></name> <name><surname>Elngar</surname> <given-names>A. A.</given-names></name> <name><surname>Kamali</surname> <given-names>A.-M.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>A systematic review on machine learning and deep learning techniques in the effective diagnosis of Alzheimer&#x00027;s disease</article-title>. <source>Brain Inform</source>. <volume>10</volume>:<fpage>17</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s40708-023-00195-7</pub-id><pub-id pub-id-type="pmid">37450224</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Babiloni</surname> <given-names>C.</given-names></name> <name><surname>Blinowska</surname> <given-names>K.</given-names></name> <name><surname>Bonanni</surname> <given-names>L.</given-names></name> <name><surname>Cichocki</surname> <given-names>A.</given-names></name> <name><surname>De Haan</surname> <given-names>W.</given-names></name> <name><surname>Del Percio</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>What electrophysiology tells us about Alzheimer&#x00027;s disease: a window into the synchronization and connectivity of brain neurons</article-title>. <source>Neurobiol. Aging</source>, <volume>85</volume>, <fpage>58</fpage>&#x02013;<lpage>73</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2019.09.008</pub-id><pub-id pub-id-type="pmid">31739167</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Djordjevic</surname> <given-names>M.</given-names></name> <name><surname>Petrovic</surname> <given-names>A.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2024</year>). Crop yield forecasting based on echo state network tuned by crayfish optimization algorithm,&#x0201D; in <italic>2024 IEEE International Conference on Contemporary Computing and Communications (InC4)</italic> (IEEE), <fpage>1</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/InC460750.2024.10649266</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bai</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Zheng</surname> <given-names>M.</given-names></name> <name><surname>Khatir</surname> <given-names>S.</given-names></name> <name><surname>Benaissa</surname> <given-names>B.</given-names></name> <name><surname>Abualigah</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>A sinh cosh optimizer</article-title>. <source>Knowl.-Based Syst</source>. <volume>282</volume>:<fpage>111081</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.knosys.2023.111081</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bhatt</surname> <given-names>D.</given-names></name> <name><surname>Patel</surname> <given-names>C.</given-names></name> <name><surname>Talsania</surname> <given-names>H.</given-names></name> <name><surname>Patel</surname> <given-names>J.</given-names></name> <name><surname>Vaghela</surname> <given-names>R.</given-names></name> <name><surname>Pandya</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Cnn variants for computer vision: history, architecture, application, challenges and future scope</article-title>. <source>Electronics</source> <volume>10</volume>:<fpage>2470</fpage>. doi: <pub-id pub-id-type="doi">10.3390/electronics10202470</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bloch</surname> <given-names>L.</given-names></name> <name><surname>Friedrich</surname> <given-names>C. M.</given-names></name></person-group> <collab>Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab> (<year>2024</year>). <article-title>Systematic comparison of 3d deep learning and classical machine learning explanations for Alzheimer&#x00027;s disease detection</article-title>. <source>Comput. Biol. Med</source>. <volume>170</volume>:<fpage>108029</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.108029</pub-id><pub-id pub-id-type="pmid">38308870</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bozovic</surname> <given-names>A.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Dobrojevic</surname> <given-names>M.</given-names></name> <name><surname>Antonijevic</surname> <given-names>M.</given-names></name> <name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Desnica</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Exploring the applicability of decision trees and deep neural networks optimized by metaheuristics for predictive maintenance in milling</article-title>. <source>J. Supercomput</source>. <volume>81</volume>:<fpage>1601</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s11227-025-08082-0</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Breiman</surname> <given-names>L.</given-names></name></person-group> (<year>2001</year>). <article-title>Random forests</article-title>. <source>Mach. Learn</source>. <volume>45</volume>, <fpage>5</fpage>&#x02013;<lpage>32</lpage>. doi: <pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Budiman</surname> <given-names>A.</given-names></name> <name><surname>Yaputera</surname> <given-names>R. A.</given-names></name> <name><surname>Achmad</surname> <given-names>S.</given-names></name> <name><surname>Kurniawan</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Student attendance with face recognition (LBPH or CNN): systematic literature review</article-title>. <source>Procedia Comput. Sci</source>. <volume>216</volume>, <fpage>31</fpage>&#x02013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.procs.2022.12.108</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>T.</given-names></name> <name><surname>Guestrin</surname> <given-names>C.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;Xgboost: a scalable tree boosting system,&#x0201D;</article-title> in <source>Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</source>, <fpage>785</fpage>&#x02013;<lpage>794</lpage>. doi: <pub-id pub-id-type="doi">10.1145/2939672.2939785</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Qian</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Su</surname> <given-names>W.</given-names></name> <name><surname>Huang</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Prediction models for conversion from mild cognitive impairment to Alzheimer&#x00027;s disease: a systematic review and meta-analysis</article-title>. <source>Front. Aging Neurosci</source>. <volume>14</volume>:<fpage>840386</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnagi.2022.840386</pub-id><pub-id pub-id-type="pmid">35493941</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Costanzo</surname> <given-names>M.</given-names></name> <name><surname>Cutrona</surname> <given-names>C.</given-names></name> <name><surname>Leodori</surname> <given-names>G.</given-names></name> <name><surname>Malimpensa</surname> <given-names>L.</given-names></name> <name><surname>D&#x00027;antonio</surname> <given-names>F.</given-names></name> <name><surname>Conte</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Exploring easily accessible neurophysiological biomarkers for predicting Alzheimer&#x00027;s disease progression: a systematic review</article-title>. <source>Alzheimer&#x00027;s Res. Ther</source>. <volume>16</volume>:<fpage>244</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s13195-024-01607-4</pub-id><pub-id pub-id-type="pmid">39497149</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>de Ville</surname> <given-names>B.</given-names></name></person-group> (<year>2013</year>). <article-title>Decision trees</article-title>. <source>WIREs Comput. Stat</source>. <volume>5</volume>, <fpage>448</fpage>&#x02013;<lpage>455</lpage>. doi: <pub-id pub-id-type="doi">10.1002/wics.1278</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Degadwala</surname> <given-names>S.</given-names></name> <name><surname>Vyas</surname> <given-names>D.</given-names></name> <name><surname>Jadeja</surname> <given-names>A.</given-names></name> <name><surname>Pandya</surname> <given-names>D. D.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Enhancing Alzheimer stage classification of MRI images through transfer learning,&#x0201D;</article-title> in <source>2023 5th International Conference on Inventive Research in Computing Applications (ICIRCA)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>733</fpage>&#x02013;<lpage>737</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICIRCA57980.2023.10220651</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dobrojevic</surname> <given-names>M.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Babic</surname> <given-names>L.</given-names></name> <name><surname>Cajic</surname> <given-names>M.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Cyberbullying sexism harassment identification by metaheurustics-tuned extreme gradient boosting</article-title>. <source>Comput. Mater. Cont</source>. <volume>80</volume>:<fpage>4997</fpage>. doi: <pub-id pub-id-type="doi">10.32604/cmc.2024.054459</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>El-Assy</surname> <given-names>A.</given-names></name> <name><surname>Amer</surname> <given-names>H. M.</given-names></name> <name><surname>Ibrahim</surname> <given-names>H.</given-names></name> <name><surname>Mohamed</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>A novel CNN architecture for accurate early detection and classification of Alzheimer&#x00027;s disease using MRI data</article-title>. <source>Sci. Rep</source>. <volume>14</volume>:<fpage>3463</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-024-53733-6</pub-id><pub-id pub-id-type="pmid">38342924</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>El-Sappagh</surname> <given-names>S.</given-names></name> <name><surname>Saleh</surname> <given-names>H.</given-names></name> <name><surname>Ali</surname> <given-names>F.</given-names></name> <name><surname>Amer</surname> <given-names>E.</given-names></name> <name><surname>Abuhmed</surname> <given-names>T.</given-names></name></person-group> (<year>2022</year>). <article-title>Two-stage deep learning model for Alzheimer&#x00027;s disease detection and prediction of the mild cognitive impairment time</article-title>. <source>Neural Comput. Applic</source>. <volume>34</volume>, <fpage>14487</fpage>&#x02013;<lpage>14509</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00521-022-07263-9</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Givian</surname> <given-names>H.</given-names></name> <name><surname>Calbimonte</surname> <given-names>J.-P.</given-names></name></person-group> <collab>for the Alzheimer&#x00027;s Disease Neuroimaging Initiative</collab> (<year>2024</year>). <article-title>Early diagnosis of Alzheimer&#x00027;s disease and mild cognitive impairment using MRI analysis and machine learning algorithms</article-title>. <source>Disc. Appl. Sci</source>. <volume>7</volume>:<fpage>27</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s42452-024-06440-w</pub-id><pub-id pub-id-type="pmid">39712291</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gu</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Kuen</surname> <given-names>J.</given-names></name> <name><surname>Ma</surname> <given-names>L.</given-names></name> <name><surname>Shahroudy</surname> <given-names>A.</given-names></name> <name><surname>Shuai</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Recent advances in convolutional neural networks</article-title>. <source>Pattern Recognit</source>. <volume>77</volume>, <fpage>354</fpage>&#x02013;<lpage>377</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.patcog.2017.10.013</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Guenette</surname> <given-names>J. P.</given-names></name> <name><surname>Stern</surname> <given-names>R. A.</given-names></name> <name><surname>Tripodis</surname> <given-names>Y.</given-names></name> <name><surname>Chua</surname> <given-names>A. S.</given-names></name> <name><surname>Schultz</surname> <given-names>V.</given-names></name> <name><surname>Sydnor</surname> <given-names>V. J.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Automated versus manual segmentation of brain region volumes in former football players</article-title>. <source>Neuroimage</source> <volume>18</volume>, <fpage>888</fpage>&#x02013;<lpage>896</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nicl.2018.03.026</pub-id><pub-id pub-id-type="pmid">29876273</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gurrola-Ramos</surname> <given-names>J.</given-names></name> <name><surname>Hern&#x000E0;ndez-Aguirre</surname> <given-names>A.</given-names></name> <name><surname>Dalmau-Cede&#x000F1;o</surname> <given-names>O.</given-names></name></person-group> (<year>2020</year>). <article-title>&#x0201C;Colshade for real-world single-objective constrained optimization problems,&#x0201D;</article-title> in <source>2020 IEEE Congress on Evolutionary Computation (CEC)</source>, <fpage>1</fpage>&#x02013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CEC48606.2020.9185583</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hasib</surname> <given-names>K. M.</given-names></name> <name><surname>Azam</surname> <given-names>S.</given-names></name> <name><surname>Karim</surname> <given-names>A.</given-names></name> <name><surname>Al Marouf</surname> <given-names>A.</given-names></name> <name><surname>Shamrat</surname> <given-names>F. J. M.</given-names></name> <name><surname>Montaha</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>MCNN-LSTM: combining CNN and LSTM to classify multi-class text in imbalanced news data</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>93048</fpage>&#x02013;<lpage>93063</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3309697</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hastie</surname> <given-names>T.</given-names></name> <name><surname>Rosset</surname> <given-names>S.</given-names></name> <name><surname>Zhu</surname> <given-names>J.</given-names></name> <name><surname>Zou</surname> <given-names>H.</given-names></name></person-group> (<year>2009</year>). <article-title>Multi-class adaboost</article-title>. <source>Stat. Interface</source> <volume>2</volume>, <fpage>349</fpage>&#x02013;<lpage>360</lpage>. doi: <pub-id pub-id-type="doi">10.4310/SII.2009.v2.n3.a8</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Helaly</surname> <given-names>H. A.</given-names></name> <name><surname>Badawy</surname> <given-names>M.</given-names></name> <name><surname>Haikal</surname> <given-names>A. Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Deep learning approach for early detection of Alzheimer&#x00027;s disease</article-title>. <source>Cognit. Comput</source>., <volume>14</volume>, <fpage>1711</fpage>&#x02013;<lpage>1727</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12559-021-09946-2</pub-id><pub-id pub-id-type="pmid">34745371</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Jovanovic</surname> <given-names>D.</given-names></name> <name><surname>Antonijevic</surname> <given-names>M.</given-names></name> <name><surname>Nikolic</surname> <given-names>B.</given-names></name> <name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Improving phishing website detection using a hybrid two-level framework for feature selection and xgboost tuning</article-title>. <source>J. Web Eng</source>. <volume>22</volume>, <fpage>543</fpage>&#x02013;<lpage>574</lpage>. doi: <pub-id pub-id-type="doi">10.13052/jwe1540-9589.2237</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Karaboga</surname> <given-names>D.</given-names></name> <name><surname>Basturk</surname> <given-names>B.</given-names></name></person-group> (<year>2007</year>). <article-title>A powerful and efficient algorithm for numerical function optimization: artificial bee colony (ABC) algorithm</article-title>. <source>J. Global Optim</source>. <volume>39</volume>, <fpage>459</fpage>&#x02013;<lpage>471</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10898-007-9149-x</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kareem</surname> <given-names>S.</given-names></name> <name><surname>Hamad</surname> <given-names>Z. J.</given-names></name> <name><surname>Askar</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>An evaluation of CNN and ANN in prediction weather forecasting: a review</article-title>. <source>Sustain. Eng. Innov</source>. <volume>3</volume>:<fpage>148</fpage>. doi: <pub-id pub-id-type="doi">10.37868/sei.v3i2.id146</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kaushik</surname> <given-names>A.</given-names></name> <name><surname>Singh</surname> <given-names>J.</given-names></name> <name><surname>Mahajan</surname> <given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>Computational study of the progression of Alzheimer&#x00027;s disease and changes in hippocampal theta rhythm activities due to beta-amyloid altered calcium dependent ionic channels</article-title>. <source>Int. J. Med. Eng. Inform</source>. <volume>16</volume>, <fpage>71</fpage>&#x02013;<lpage>81</lpage>. doi: <pub-id pub-id-type="doi">10.1504/IJMEI.2024.135686</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ke</surname> <given-names>G.</given-names></name> <name><surname>Meng</surname> <given-names>Q.</given-names></name> <name><surname>Finley</surname> <given-names>T.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Chen</surname> <given-names>W.</given-names></name> <name><surname>Ma</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>&#x0201C;Lightgbm: a highly efficient gradient boosting decision tree,&#x0201D;</article-title> in <source>Advances in Neural Information Processing Systems</source>, 30.</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kennedy</surname> <given-names>J.</given-names></name> <name><surname>Eberhart</surname> <given-names>R.</given-names></name></person-group> (<year>1995</year>). <article-title>&#x0201C;Particle swarm optimization,&#x0201D;</article-title> in <source>Proceedings of ICNN&#x00027;95</source> - <italic>International Conference on Neural Networks</italic>, <fpage>1942</fpage>&#x02013;<lpage>1948</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICNN.1995.488968</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Kramer</surname> <given-names>O.</given-names></name></person-group> (<year>2013</year>). <source>K-Nearest Neighbors</source>. <publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer Berlin Heidelberg</publisher-name>.</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Lakicevic</surname> <given-names>B.</given-names></name> <name><surname>Spalevic</surname> <given-names>Z.</given-names></name> <name><surname>Volas</surname> <given-names>I.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>&#x0201C;Artificial neural networks with soft attention: natural language processing for phishing email detection optimized with modified metaheuristics,&#x0201D;</article-title> in <source>International Conference on Advanced Network Technologies and Intelligent Computing</source> (<publisher-loc>Springer</publisher-loc>), <fpage>421</fpage>&#x02013;<lpage>438</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-83790-6_27</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lao</surname> <given-names>Z.</given-names></name> <name><surname>He</surname> <given-names>D.</given-names></name> <name><surname>Wei</surname> <given-names>Z.</given-names></name> <name><surname>Shang</surname> <given-names>H.</given-names></name> <name><surname>Jin</surname> <given-names>Z.</given-names></name> <name><surname>Miao</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Intelligent fault diagnosis for rail transit switch machine based on adaptive feature selection and improved lightgbm</article-title>. <source>Eng. Fail. Anal</source>. <volume>148</volume>:<fpage>107219</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.engfailanal.2023.107219</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>LaTorre</surname> <given-names>A.</given-names></name> <name><surname>Molina</surname> <given-names>D.</given-names></name> <name><surname>Osaba</surname> <given-names>E.</given-names></name> <name><surname>Poyatos</surname> <given-names>J.</given-names></name> <name><surname>Del Ser</surname> <given-names>J.</given-names></name> <name><surname>Herrera</surname> <given-names>F.</given-names></name></person-group> (<year>2021</year>). <article-title>A prescription of methodological guidelines for comparing bio-inspired optimization algorithms</article-title>. <source>Swarm Evolut. Comput</source>. <volume>67</volume>:<fpage>100973</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.swevo.2021.100973</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>Shen</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>F.</given-names></name> <name><surname>Qi</surname> <given-names>W.</given-names></name> <name><surname>Jeon</surname> <given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>A lightgbm-based strategy to predict tunnel rockmass class from TBM construction data for building control</article-title>. <source>Adv. Eng. Inform</source>. <volume>58</volume>:<fpage>102130</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.aei.2023.102130</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Lundberg</surname> <given-names>S. M.</given-names></name> <name><surname>Lee</surname> <given-names>S.-I.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;A unified approach to interpreting model predictions,&#x0201D;</article-title> in <source>Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS&#x00027;17</source> (<publisher-loc>Red Hook, NY, USA</publisher-loc>: <publisher-name>Curran Associates Inc.</publisher-name>), <fpage>4768</fpage>&#x02013;<lpage>4777</lpage>.</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Luo</surname> <given-names>W.</given-names></name> <name><surname>Lin</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <name><surname>Yang</surname> <given-names>S.</given-names></name> <name><surname>Shi</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Benchmark functions for CEC 2022 competition on seeking multiple optima in dynamic environments</article-title>. <source>arXiv preprint arXiv:2201.00523</source>.</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Mahanty</surname> <given-names>C.</given-names></name> <name><surname>Patro</surname> <given-names>S. G. K.</given-names></name> <name><surname>Dannana</surname> <given-names>P.</given-names></name></person-group> (<year>2024a</year>). <article-title>&#x0201C;Alzheimer&#x00027;s disease detection using an ensemble of transfer learning models,&#x0201D;</article-title> in <source>2024 OITS International Conference on Information Technology (OCIT)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>261</fpage>&#x02013;<lpage>266</lpage>. doi: <pub-id pub-id-type="doi">10.1109/OCIT65031.2024.00053</pub-id></mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mahanty</surname> <given-names>C.</given-names></name> <name><surname>Rajesh</surname> <given-names>T.</given-names></name> <name><surname>Govil</surname> <given-names>N.</given-names></name> <name><surname>Venkateswarulu</surname> <given-names>N.</given-names></name> <name><surname>Kumar</surname> <given-names>S.</given-names></name> <name><surname>Lasisi</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2024b</year>). <article-title>Effective Alzheimer&#x00027;s disease detection using enhanced xception blending with snapshot ensemble</article-title>. <source>Sci. Rep</source>. <volume>14</volume>:<fpage>29263</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-024-80548-2</pub-id><pub-id pub-id-type="pmid">39587224</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Matthews</surname> <given-names>B. W.</given-names></name></person-group> (<year>1975</year>). <article-title>Comparison of the predicted and observed secondary structure of t4 phage lysozyme</article-title>. <source>Biochim. Biophys. Acta</source> <volume>405</volume>, <fpage>442</fpage>&#x02013;<lpage>451</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0005-2795(75)90109-9</pub-id><pub-id pub-id-type="pmid">1180967</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Menagadevi</surname> <given-names>M.</given-names></name> <name><surname>Devaraj</surname> <given-names>S.</given-names></name> <name><surname>Madian</surname> <given-names>N.</given-names></name> <name><surname>Thiyagarajan</surname> <given-names>D.</given-names></name></person-group> (<year>2024</year>). <article-title>Machine and deep learning approaches for Alzheimer disease detection using magnetic resonance images: an updated review</article-title>. <source>Measurement</source> <volume>226</volume>:<fpage>114100</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.measurement.2023.114100</pub-id></mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mirjalili</surname> <given-names>S.</given-names></name></person-group> (<year>2016</year>). <article-title>SCA: a sine cosine algorithm for solving optimization problems</article-title>. <source>Knowl. Based Syst</source>. <volume>96</volume>, <fpage>120</fpage>&#x02013;<lpage>133</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.knosys.2015.12.022</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Mirjalili</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <source>Genetic Algorithm</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>.</mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mirjalili</surname> <given-names>S.</given-names></name> <name><surname>Lewis</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>The whale optimization algorithm</article-title>. <source>Adv. Eng. Softw</source>. <volume>95</volume>, <fpage>51</fpage>&#x02013;<lpage>67</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.advengsoft.2016.01.008</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mladenovi&#x00107;</surname> <given-names>N.</given-names></name> <name><surname>Hansen</surname> <given-names>P.</given-names></name></person-group> (<year>1997</year>). <article-title>Variable neighborhood search</article-title>. <source>Comput. Oper. Res</source>. <volume>24</volume>, <fpage>1097</fpage>&#x02013;<lpage>1100</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0305-0548(97)00031-2</pub-id></mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nair</surname> <given-names>V.</given-names></name> <name><surname>Hinton</surname> <given-names>G. E.</given-names></name></person-group> (<year>2010</year>). <article-title>&#x0201C;Rectified linear units improve restricted boltzmann machines,&#x0201D;</article-title> in <source>Proceedings of the 27th International Conference on Machine Learning (ICML-10)</source>, <fpage>807</fpage>&#x02013;<lpage>814</lpage>.</mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nawaz</surname> <given-names>H.</given-names></name> <name><surname>Maqsood</surname> <given-names>M.</given-names></name> <name><surname>Afzal</surname> <given-names>S.</given-names></name> <name><surname>Aadil</surname> <given-names>F.</given-names></name> <name><surname>Mehmood</surname> <given-names>I.</given-names></name> <name><surname>Rho</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>A deep feature-based real-time system for Alzheimer disease stage detection</article-title>. <source>Multimed. Tools Appl</source>. <volume>80</volume>, <fpage>35789</fpage>&#x02013;<lpage>35807</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11042-020-09087-y</pub-id></mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nguyen</surname> <given-names>D.</given-names></name> <name><surname>Nguyen</surname> <given-names>H.</given-names></name> <name><surname>Ong</surname> <given-names>H.</given-names></name> <name><surname>Le</surname> <given-names>H.</given-names></name> <name><surname>Ha</surname> <given-names>H.</given-names></name> <name><surname>Duc</surname> <given-names>N. T.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Ensemble learning using traditional machine learning and deep neural network for diagnosis of Alzheimer&#x00027;s disease</article-title>. <source>IBRO Neurosci. Rep</source>. <volume>13</volume>, <fpage>255</fpage>&#x02013;<lpage>263</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ibneur.2022.08.010</pub-id><pub-id pub-id-type="pmid">36590098</pub-id></mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Petrovic</surname> <given-names>A.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Antonijevic</surname> <given-names>M.</given-names></name> <name><surname>Savanovic</surname> <given-names>N.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Exploring metaheuristic optimized machine learning for software defect detection on natural language and classical datasets</article-title>. <source>Mathematics</source> <volume>12</volume>:<fpage>2918</fpage>. doi: <pub-id pub-id-type="doi">10.3390/math12182918</pub-id></mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Petrovic</surname> <given-names>A.</given-names></name> <name><surname>Stoean</surname> <given-names>C.</given-names></name> <name><surname>Stoean</surname> <given-names>R.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Simic</surname> <given-names>V.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Evaluation performance of metaheuristics-tuned convolutional neural networks for direct current motor using mel spectrograms</article-title>. <source>Arabian J. Sci. Eng</source>. <volume>2025</volume>, <fpage>1</fpage>&#x02013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s13369-025-10950-z</pub-id></mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Po&#x00142;ap</surname> <given-names>D.</given-names></name> <name><surname>Wo&#x0017A;niak</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Red fox optimization algorithm</article-title>. <source>Expert Syst. Appl</source>. <volume>166</volume>:<fpage>114107</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eswa.2020.114107</pub-id></mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prasath</surname> <given-names>T.</given-names></name> <name><surname>Sumathi</surname> <given-names>V.</given-names></name></person-group> (<year>2023</year>). <article-title>Identification of Alzheimer&#x00027;s disease by imaging: a comprehensive review</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>20</volume>:<fpage>1273</fpage>. doi: <pub-id pub-id-type="doi">10.3390/ijerph20021273</pub-id><pub-id pub-id-type="pmid">36674027</pub-id></mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prokhorenkova</surname> <given-names>L.</given-names></name> <name><surname>Gusev</surname> <given-names>G.</given-names></name> <name><surname>Vorobev</surname> <given-names>A.</given-names></name> <name><surname>Dorogush</surname> <given-names>A. V.</given-names></name> <name><surname>Gulin</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>&#x0201C;Catboost: unbiased boosting with categorical features,&#x0201D;</article-title> in <source>Advances in Neural Information Processing Systems</source>, 31.</mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Purkovic</surname> <given-names>S.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Antonijevic</surname> <given-names>M.</given-names></name> <name><surname>Dolicanin</surname> <given-names>E.</given-names></name> <name><surname>Tuba</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Audio analysis with convolutional neural networks and boosting algorithms tuned by metaheuristics for respiratory condition classification</article-title>. <source>J. King Saud Univ. Comput. Inf. Sci</source>. <volume>36</volume>:<fpage>102261</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jksuci.2024.102261</pub-id></mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Rahnamayan</surname> <given-names>S.</given-names></name> <name><surname>Tizhoosh</surname> <given-names>H. R.</given-names></name> <name><surname>Salama</surname> <given-names>M. M.</given-names></name></person-group> (<year>2007</year>). <article-title>&#x0201C;Quasi-oppositional differential evolution,&#x0201D;</article-title> in <source>2007 IEEE Congress on Evolutionary Computation</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>2229</fpage>&#x02013;<lpage>2236</lpage>. doi: <pub-id pub-id-type="doi">10.1109/CEC.2007.4424748</pub-id></mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rajan</surname> <given-names>K. B.</given-names></name> <name><surname>Weuve</surname> <given-names>J.</given-names></name> <name><surname>Barnes</surname> <given-names>L. L.</given-names></name> <name><surname>McAninch</surname> <given-names>E. A.</given-names></name> <name><surname>Wilson</surname> <given-names>R. S.</given-names></name> <name><surname>Evans</surname> <given-names>D. A.</given-names></name></person-group> (<year>2021</year>). <article-title>Population estimate of people with clinical Alzheimer&#x00027;s disease and mild cognitive impairment in the united states (2020&#x02013;2060)</article-title>. <source>Alzheimer&#x00027;s Dement</source>. <volume>17</volume>, <fpage>1966</fpage>&#x02013;<lpage>1975</lpage>. doi: <pub-id pub-id-type="doi">10.1002/alz.12362</pub-id><pub-id pub-id-type="pmid">34043283</pub-id></mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Raza</surname> <given-names>M. L.</given-names></name> <name><surname>Hassan</surname> <given-names>S. T.</given-names></name> <name><surname>Jamil</surname> <given-names>S.</given-names></name> <name><surname>Hyder</surname> <given-names>N.</given-names></name> <name><surname>Batool</surname> <given-names>K.</given-names></name> <name><surname>Walji</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Advancements in deep learning for early diagnosis of Alzheimer&#x00027;s disease using multimodal neuroimaging: challenges and future directions</article-title>. <source>Front. Neuroinform</source>. <volume>19</volume>:<fpage>1557177</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fninf.2025.1557177</pub-id><pub-id pub-id-type="pmid">40385089</pub-id></mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Salehi</surname> <given-names>A. W.</given-names></name> <name><surname>Khan</surname> <given-names>S.</given-names></name> <name><surname>Gupta</surname> <given-names>G.</given-names></name> <name><surname>Alabduallah</surname> <given-names>B. I.</given-names></name> <name><surname>Almjally</surname> <given-names>A.</given-names></name> <name><surname>Alsolai</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>A study of CNN and transfer learning in medical imaging: advantages, challenges, future scope</article-title>. <source>Sustainability</source> <volume>15</volume>:<fpage>5930</fpage>. doi: <pub-id pub-id-type="doi">10.3390/su15075930</pub-id></mixed-citation>
</ref>
<ref id="B63">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sarkar</surname> <given-names>M.</given-names></name></person-group> (<year>2025</year>). <article-title>Integrating machine learning and deep learning techniques for advanced Alzheimer&#x00027;s disease detection through gait analysis</article-title>. <source>J. Business Manag. Stud</source>. <volume>7</volume>, <fpage>140</fpage>&#x02013;<lpage>147</lpage>. doi: <pub-id pub-id-type="doi">10.32996/jbms.2025.7.1.8</pub-id></mixed-citation>
</ref>
<ref id="B64">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sava&#x0015F;</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Detecting the stages of Alzheimer&#x00027;s disease with pre-trained deep learning architectures</article-title>. <source>Arabian J. Sci. Eng</source>. <volume>47</volume>, <fpage>2201</fpage>&#x02013;<lpage>2218</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s13369-021-06131-3</pub-id></mixed-citation>
</ref>
<ref id="B65">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schultz</surname> <given-names>B. B.</given-names></name></person-group> (<year>1985</year>). <article-title>Levene&#x00027;s test for relative variation</article-title>. <source>Syst. Biol</source>. <volume>34</volume>, <fpage>449</fpage>&#x02013;<lpage>456</lpage>. doi: <pub-id pub-id-type="doi">10.1093/sysbio/34.4.449</pub-id></mixed-citation>
</ref>
<ref id="B66">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shamrat</surname> <given-names>F. J. M.</given-names></name> <name><surname>Akter</surname> <given-names>S.</given-names></name> <name><surname>Azam</surname> <given-names>S.</given-names></name> <name><surname>Karim</surname> <given-names>A.</given-names></name> <name><surname>Ghosh</surname> <given-names>P.</given-names></name> <name><surname>Tasnim</surname> <given-names>Z.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Alzheimernet: An effective deep learning based proposition for Alzheimer&#x00027;s disease stages classification from functional brain changes in magnetic resonance images</article-title>. <source>IEEE Access</source> <volume>11</volume>, <fpage>16376</fpage>&#x02013;<lpage>16395</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3244952</pub-id></mixed-citation>
</ref>
<ref id="B67">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shapiro</surname> <given-names>S. S.</given-names></name> <name><surname>Francia</surname> <given-names>R. S.</given-names></name></person-group> (<year>1972</year>). <article-title>An approximate analysis of variance test for normality</article-title>. <source>J. Am. Stat. Assoc</source>. <volume>67</volume>, <fpage>215</fpage>&#x02013;<lpage>216</lpage>. doi: <pub-id pub-id-type="doi">10.1080/01621459.1972.10481232</pub-id></mixed-citation>
</ref>
<ref id="B68">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>S. G.</given-names></name> <name><surname>Das</surname> <given-names>D.</given-names></name> <name><surname>Barman</surname> <given-names>U.</given-names></name> <name><surname>Saikia</surname> <given-names>M. J.</given-names></name></person-group> (<year>2024</year>). <article-title>Early Alzheimer&#x00027;s disease detection: a review of machine learning techniques for forecasting transition from mild cognitive impairment</article-title>. <source>Diagnostics</source> <volume>14</volume>:<fpage>1759</fpage>. doi: <pub-id pub-id-type="doi">10.3390/diagnostics14161759</pub-id><pub-id pub-id-type="pmid">39202248</pub-id></mixed-citation>
</ref>
<ref id="B69">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tajahmadi</surname> <given-names>S.</given-names></name> <name><surname>Molavi</surname> <given-names>H.</given-names></name> <name><surname>Ahmadijokani</surname> <given-names>F.</given-names></name> <name><surname>Shamloo</surname> <given-names>A.</given-names></name> <name><surname>Shojaei</surname> <given-names>A.</given-names></name> <name><surname>Sharifzadeh</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Metal-organic frameworks: a promising option for the diagnosis and treatment of Alzheimer&#x00027;s disease</article-title>. <source>J. Controlled Release</source> <volume>353</volume>, <fpage>1</fpage>&#x02013;<lpage>29</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jconrel.2022.11.002</pub-id><pub-id pub-id-type="pmid">36343762</pub-id></mixed-citation>
</ref>
<ref id="B70">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Talbi</surname> <given-names>E.-G.</given-names></name></person-group> (<year>2009</year>). <source>Metaheuristics: From Design to Implementation</source>. New York: John Wiley and Sons. doi: <pub-id pub-id-type="doi">10.1002/9780470496916</pub-id></mixed-citation>
</ref>
<ref id="B71">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Villoth</surname> <given-names>J. P.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name> <name><surname>Abdel-salam</surname> <given-names>M.</given-names></name> <name><surname>Hammad</surname> <given-names>M.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Two-tier deep and machine learning approach optimized by adaptive multi-population firefly algorithm for software defects prediction</article-title>. <source>Neurocomputing</source> <volume>630</volume>:<fpage>129695</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neucom.2025.129695</pub-id></mixed-citation>
</ref>
<ref id="B72">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Villoth</surname> <given-names>S. J.</given-names></name> <name><surname>Villoth</surname> <given-names>J. P.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Mani</surname> <given-names>J.</given-names></name> <name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>&#x0201C;Optimizing error detection in generated code using metaheuristic optimized natural language processing,&#x0201D;</article-title> in <source>International Conference on Soft Computing and its Engineering Applications</source> (<publisher-loc>Springer</publisher-loc>), <fpage>239</fpage>&#x02013;<lpage>253</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-88039-1_19</pub-id></mixed-citation>
</ref>
<ref id="B73">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>D.-n.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Zhao</surname> <given-names>D.</given-names></name></person-group> (<year>2022</year>). <article-title>Corporate finance risk prediction based on lightgbm</article-title>. <source>Inf. Sci</source>. <volume>602</volume>, <fpage>259</fpage>&#x02013;<lpage>268</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ins.2022.04.058</pub-id></mixed-citation>
</ref>
<ref id="B74">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wolpert</surname> <given-names>D.</given-names></name> <name><surname>Macready</surname> <given-names>W.</given-names></name></person-group> (<year>1997</year>). <article-title>No free lunch theorems for optimization</article-title>. <source>IEEE Trans. Evol. Comput</source>. <volume>1</volume>, <fpage>67</fpage>&#x02013;<lpage>82</lpage>. doi: <pub-id pub-id-type="doi">10.1109/4235.585893</pub-id></mixed-citation>
</ref>
<ref id="B75">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Woolson</surname> <given-names>R. F.</given-names></name></person-group> (<year>2005</year>). <article-title>&#x0201C;Wilcoxon signed-rank test,&#x0201D;</article-title> in <source>Wiley Encyclopedia of Clinical Trials</source>. doi: <pub-id pub-id-type="doi">10.1002/0470011815.b2a15177</pub-id></mixed-citation>
</ref>
<ref id="B76">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>X.-S.</given-names></name> <name><surname>He</surname> <given-names>X.</given-names></name></person-group> (<year>2013a</year>). <article-title>Bat algorithm: literature review and applications</article-title>. <source>Int. J. Bio-Insp. Comput</source>. <volume>5</volume>, <fpage>141</fpage>&#x02013;<lpage>149</lpage>. doi: <pub-id pub-id-type="doi">10.1504/IJBIC.2013.055093</pub-id></mixed-citation>
</ref>
<ref id="B77">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>X.-S.</given-names></name> <name><surname>He</surname> <given-names>X.</given-names></name></person-group> (<year>2013b</year>). <article-title>Firefly algorithm: recent advances and applications</article-title>. <source>Int. J. Swarm Intell</source>. <volume>1</volume>, <fpage>36</fpage>&#x02013;<lpage>50</lpage>. doi: <pub-id pub-id-type="doi">10.1504/IJSI.2013.055801</pub-id></mixed-citation>
</ref>
<ref id="B78">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>T.</given-names></name> <name><surname>Liu</surname> <given-names>X.</given-names></name> <name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Q.</given-names></name></person-group> (<year>2021</year>). <article-title>Electrophysiological biomarkers of epileptogenicity in Alzheimer&#x00027;s disease</article-title>. <source>Front. Hum. Neurosci</source>. <volume>15</volume>:<fpage>747077</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2021.747077</pub-id><pub-id pub-id-type="pmid">34916917</pub-id></mixed-citation>
</ref>
<ref id="B79">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>N.</given-names></name> <name><surname>Chai</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name></person-group> (<year>2025</year>). <article-title>Assessing and projecting the global impacts of Alzheimer&#x00027;s disease</article-title>. <source>Front. Public Health</source> <volume>12</volume>:<fpage>1453489</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpubh.2024.1453489</pub-id><pub-id pub-id-type="pmid">39882109</pub-id></mixed-citation>
</ref>
<ref id="B80">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>Z.</given-names></name> <name><surname>Chuah</surname> <given-names>J. H.</given-names></name> <name><surname>Lai</surname> <given-names>K. W.</given-names></name> <name><surname>Chow</surname> <given-names>C.-O.</given-names></name> <name><surname>Gochoo</surname> <given-names>M.</given-names></name> <name><surname>Dhanalakshmi</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Conventional machine learning and deep learning in Alzheimer&#x00027;s disease diagnosis using neuroimaging: a review</article-title>. <source>Front. Comput. Neurosci</source>. <volume>17</volume>:<fpage>1038636</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fncom.2023.1038636</pub-id><pub-id pub-id-type="pmid">36814932</pub-id></mixed-citation>
</ref>
<ref id="B81">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Antonijevic</surname> <given-names>M.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Krasic</surname> <given-names>M.</given-names></name> <name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>&#x0201C;Ocular disease diagnosis using CNNs optimized by modified variable neighborhood search algorithm,&#x0201D;</article-title> in <source>International Joint Conference on Advances in Computational Intelligence</source> (<publisher-loc>Springer</publisher-loc>), <fpage>99</fpage>&#x02013;<lpage>112</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-981-96-3762-1_8</pub-id></mixed-citation>
</ref>
<ref id="B82">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zivkovic</surname> <given-names>M.</given-names></name> <name><surname>Bacanin</surname> <given-names>N.</given-names></name> <name><surname>Zivkovic</surname> <given-names>T.</given-names></name> <name><surname>Jovanovic</surname> <given-names>L.</given-names></name> <name><surname>Kaljevic</surname> <given-names>J.</given-names></name> <name><surname>Antonijevic</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Parkinson&#x00027;s detection from gait time series classification using LSTM tuned by modified RSA algorithm,&#x0201D;</article-title> in <source>International Conference on Communication and Computational Technologies</source> (<publisher-loc>Springer</publisher-loc>), <fpage>119</fpage>&#x02013;<lpage>134</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-981-97-7423-4_10</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2365931/overview">Yuhua Li</ext-link>, Cardiff University, United Kingdom</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3100432/overview">Akanksha Kaushik</ext-link>, The NorthCap University, India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3186718/overview">Chandrakanta Mahanty</ext-link>, Gandhi Institute of Technology and Management, India</p>
</fn>
</fn-group>
<fn-group>
<fn id="fn0003"><label>1</label><p><ext-link ext-link-type="uri" xlink:href="https://www.who.int/news-room/fact-sheets/detail/dementia">https://www.who.int/news-room/fact-sheets/detail/dementia</ext-link></p></fn>
<fn id="fn0004"><label>2</label><p><ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/datasets/aryansinghal10/alzheimers-multiclass-dataset-equal-and-augmented">https://www.kaggle.com/datasets/aryansinghal10/alzheimers-multiclass-dataset-equal-and-augmented</ext-link></p></fn>
</fn-group>
</back>
</article> 
