<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2026.1778376</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Efficient attention-based Ghost-ResNet for brain tumor classification in magnetic resonance imaging (MRI)</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Shatnawi</surname> <given-names>Nahlah</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3327057/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Nahar</surname> <given-names>Khalid M. O.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2038858/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Al Mamlook</surname> <given-names>Rabia Emhamed</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2940207/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Almuflih</surname> <given-names>Ali Saeed</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Al Fatais</surname> <given-names>Abdullah Mohammed</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Alhatamleh</surname> <given-names>Salem</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3093003/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Alishwait</surname> <given-names>Amal</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Amin</surname> <given-names>Mohammad</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2898364/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Computer Science, Faculty of Information Technology and Computer Sciences, Yarmouk University</institution>, <city>Irbid</city>, <country country="jo">Jordan</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Business Administration, Trine University</institution>, <city>Angola, IN</city>, <country country="us">United States</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Artificial Intelligence, Center of Advanced Research for Complementary Medicine, University of Zawia</institution>, <city>Zawia</city>, <country country="ly">Libya</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Industrial Engineering, College of Engineering, King Khalid University</institution>, <city>Abha</city>, <country country="sa">Saudi Arabia</country></aff>
<aff id="aff5"><label>5</label><institution>Center for Engineering and Technology Innovations, King Khalid University</institution>, <city>Abha</city>, <country country="sa">Saudi Arabia</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Abdullah Mohammed Al Fatais, <email xlink:href="mailto:alftas@kku.edu.sa">alftas@kku.edu.sa</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-23">
<day>23</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1778376</elocation-id>
<history>
<date date-type="received">
<day>30</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>28</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Shatnawi, Nahar, Al Mamlook, Almuflih, Al Fatais, Alhatamleh, Alishwait and Amin.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Shatnawi, Nahar, Al Mamlook, Almuflih, Al Fatais, Alhatamleh, Alishwait and Amin</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-23">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Brain tumor classification from magnetic resonance imaging remains a challenging task in medical image analysis, particularly when high diagnostic performance must be achieved under limited computational resources. Effective models are therefore required to balance classification accuracy with efficiency to support practical clinical deployment.</p>
</sec>
<sec>
<title>Methods</title>
<p>This study addresses this challenge by proposing an efficiency-oriented deep learning architecture that integrates Ghost modules into a ResNet-50 backbone and enhances feature learning through Efficient Channel Attention (ECA) blocks. The proposed design aims to improve discriminative capability while reducing feature redundancy and computational overhead.</p>
<p>The model was evaluated on the Bangladesh Brain Cancer MRI Dataset, which contains 6,056 MRI images representing three tumor categories: glioma, meningioma, and pituitary tumors. Preprocessing included contrast normalization using Contrast Limited Adaptive Histogram Equalization (CLAHE). Data augmentation was selectively applied to improve generalization while avoiding excessive artificial amplification of feature representations.</p>
</sec>
<sec>
<title>Results</title>
<p>Experimental results demonstrate the effectiveness of the proposed attention-assisted lightweight architecture. The model achieved an overall classification accuracy of 97.85%, while macro-averaged precision, recall (sensitivity), and specificity all exceeded 97.8% (as defined in the Methods section). This corresponds to a 1.65% absolute improvement in accuracy compared with the strongest baseline model, DenseNet121, while maintaining a low false-positive rate. These findings suggest that competitive performance can be achieved without increasing architectural complexity.</p>
</sec>
<sec>
<title>Discussion</title>
<p>The results highlight the potential of pursuing efficiency-driven architectural designs as an alternative to increasingly complex deep learning models. In particular, channel-attention-assisted feature generation appears to preserve high diagnostic accuracy while reducing representational and computational overhead, supporting its suitability for resource-constrained medical imaging applications.</p>
</sec>
</abstract>
<kwd-group>
<kwd>brain tumor classification</kwd>
<kwd>deep learning</kwd>
<kwd>efficient channel attention (ECA)</kwd>
<kwd>Ghost network</kwd>
<kwd>medical image</kwd>
<kwd>MRI</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. The authors extended their appreciation to the Deanship of Research and Graduate Studies at King Khalid University for funding this work through Large Research Project (grant no. RGP2/655/46).</funding-statement>
</funding-group>
<counts>
<fig-count count="7"/>
<table-count count="5"/>
<equation-count count="28"/>
<ref-count count="35"/>
<page-count count="16"/>
<word-count count="10873"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Imaging Methods</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Brain tumors remain a persistent challenge in neuro-oncology because of their heterogeneous morphology, variable clinical behavior and substantial effect on patient survival and neurological function. Abnormal cellular proliferation contained by the brain can lead to progressive cognitive decline, motor deficiency and in advanced stages, mortality (<xref ref-type="bibr" rid="B6">Bouhafra and El Bahi, 2025</xref>). Epidemiologically, gliomas constitute roughly 22.9% of diagnosed cases however meningiomas account for the largest share at 41.7%; pituitary tumors, even though biologically distinct, present diverse prognostic and therapeutic profiles (<xref ref-type="bibr" rid="B22">Price et al., 2025</xref>).</p>
<p>Magnetic Resonance Imaging (MRI) is the modality of choosing for brain tumor evaluation, owing to its superior soft tissue contrast and spatial resolution. Existing clinical protocols integrate multiple opposite sequences T1-weighted, T2-weighted, FLAIR, and contrast-enhanced acquisitions to support tumor description and treatment planning (<xref ref-type="bibr" rid="B14">Kanal et al., 2025</xref>). Regardless of these advances, MRI interpretation continues largely manual, time-intensive and dependent on specialist expertise which limits diagnostic scalability, particularly in resource-constrained healthcare settings (<xref ref-type="bibr" rid="B2">Aiya et al., 2025</xref>).</p>
<p>A number of uncertain tasks are present within the process of computerized brain tumor classification based on MRI images. The presence within the brain tumor categories of high intra-, as well as inter-class variations regarding the attributes of size, type, consistency, and position, improves the difficulty level in this task (<xref ref-type="bibr" rid="B3">Albalawi et al., 2024</xref>). These issues are alleviated by the presence of high variations within the process of data acquisition, as well as the scanner-dependent factors, which are known to adversely affect the process of generalization (<xref ref-type="bibr" rid="B14">Kanal et al., 2025</xref>). Apart from this, the occurrence of class imbalance and restricted expert annotation regarding the distinctive types of brain tumors is a constraint within real-world clinical settings, restricting the consistency levels of the models (<xref ref-type="bibr" rid="B9">Disci et al., 2025</xref>). From the clinical point of view, efficiency is equally important, as systems that consume more resources are not easily computable within clinical settings (<xref ref-type="bibr" rid="B4">Babu Vimala et al., 2023</xref>).</p>
<p>Deep learning has restructured medical image analysis with Convolutional Neural Networks (CNNs), developing as the dominant structure for MRI-based tumor classification (<xref ref-type="bibr" rid="B3">Albalawi et al., 2024</xref>). Transfer learning has been shown to centrally contribute to this progress, assisting pretrained constructions such as VGG, ResNet, and DenseNet to be adapted to medical imaging commissions despite limited labeled data (<xref ref-type="bibr" rid="B4">Babu Vimala et al., 2023</xref>; <xref ref-type="bibr" rid="B9">Disci et al., 2025</xref>). More recent developments incorporate attention mechanisms and hybrid CNN&#x2013;Transformer designs to improve feature discrimination and contextual modeling. In parallel, lightweight architectures and modules focusing on efficiency have emerged as interesting approaches to minimize compute overheads without compromising the accuracy of diagnosis, a consideration that has increasing significance in practical clinical use scenarios (<xref ref-type="bibr" rid="B11">Han et al., 2020</xref>; <xref ref-type="bibr" rid="B10">Ferdous et al., 2025</xref>).</p>
<p>Despite the remarkable progress achieved by deep learning techniques in brain tumor classification using MRI, most state-of-the-art models still rely on deep and computationally expensive architectures. Such approaches often prioritize accuracy improvements at the cost of increased parameter size, memory consumption, and inference time, which limits their applicability in real-world clinical environments, particularly in resource-constrained settings. Moreover, several existing studies employ aggressive data augmentation and feature expansion strategies that may artificially inflate performance while increasing computational overhead. These gaps underscore the need for lightweight yet highly discriminative models that can strike a balance between classification accuracy and computational efficiency. Motivated by these limitations, this study aims to design an efficiency-oriented architecture that integrates Ghost modules with a ResNet backbone and enhances feature discrimination through Efficient Channel Attention (ECA), thereby achieving high diagnostic accuracy without sacrificing computational feasibility.</p>
<p>Based on the identified research gaps, this study seeks to address the following research questions: (1) Can the integration of Ghost modules into a ResNet-based architecture significantly reduce computational complexity while preserving or improving classification performance for brain tumor MRI images? (2) To what extent does the incorporation of Efficient Channel Attention (ECA) improve feature representation and tumor discrimination in a lightweight network? (3) How does the proposed attention-assisted Ghost-ResNet model compare with existing lightweight and standard deep learning architectures in terms of accuracy, precision, recall, specificity, and false positive rate? Answering these questions will help determine whether an efficiency-driven architectural design can challenge the prevailing assumption that higher accuracy necessarily requires deeper and more computationally intensive models.</p>
<p>This paper deals with the existing difference between performance-based consideration and usability in the clinical use scenario of brain tumor image classification using automated systems, with a focus on maximizing diagnosis accuracy along with the aspects of computational efficiency, robustness to variability in imaging acquisition, and practical clinical usability. The contributions of this paper are given below:</p>
<list list-type="order">
<list-item>
<p>The study introduces an efficient attention-based deep learning framework that integrates a Ghost-enhanced ResNet backbone with channel-wise attention to improve discriminative feature learning while reducing computational redundancy.</p>
</list-item>
<list-item>
<p>A targeted MRI preprocessing and reinforcement plan is utilized to improve image quality, stabilize training, and expand generalization across heterogeneous clinical data.</p>
</list-item>
<list-item>
<p>Wide-ranging experimental evaluation validates that the proposed method reliably outperforms established transfer-learning baselines, realizing superior accuracy, sensitivity and specificity while maintaining suitability for real-world clinical implementation.</p>
</list-item>
</list>
<p>The remainder of this paper is organized as follows. Section 2 reviews related work in deep learning-based brain tumor classification. Section 3 details the dataset, preprocessing strategy, and proposed architecture. Section 4 illustrates investigational results and relative analyses. Section 5 reflects the results in a broader clinical and technical setting and section 6 concludes with final notes and directions for future research.</p>
</sec>
<sec id="S2">
<label>2</label>
<title>Related work</title>
<p>Deep learning has been identified as the most preferred methodology found to date in classifying brain tumors from magnetic resonance images, because of its capability to learn hierarchical representations without resorting to engineered features (<xref ref-type="bibr" rid="B18">Litjens et al., 2017</xref>; <xref ref-type="bibr" rid="B26">Shen et al., 2017</xref>). Traditional computer-assisted diagnosis solutions proposed and designed around texture features, wavelet transforms, or statistical feature extraction lacked robustness when exposed to variations existing within the image modality, as well as those within the tumors (<xref ref-type="bibr" rid="B33">Zacharaki et al., 2009</xref>). Convolutional neural networks (CNNs) have since displaced these approaches by enabling end-to-end optimization directly on imaging data. Despite rapid methodological progress, a central limitation persists across both artificial intelligence and clinical domains: the prevailing dependence on increasingly deep and computationally expensive architectures to achieve marginal performance gains. <xref ref-type="table" rid="T1">Table 1</xref> displays a comparison with previous studies on brain tumor Classification.</p>
<table-wrap position="float" id="T1">
<label>TABLE 1</label>
<caption><p>Comparison of previous studies of brain tumor classification.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="center">References</th>
<th valign="top" align="center">Dataset</th>
<th valign="top" align="center">Methodology</th>
<th valign="top" align="center">Reported accuracy</th>
<th valign="top" align="center">Key limitations</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B25">Shahin (2025)</xref></td>
<td valign="top" align="center">Figshare (7,023 images, 4 classes)</td>
<td valign="top" align="center">Fine-tuned ResNet-34 with Ranger optimizer</td>
<td valign="top" align="center">99.66%</td>
<td valign="top" align="center">Single-model evaluation; limited dataset diversity</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B2">Aiya et al. (2025)</xref></td>
<td valign="top" align="center">Figshare (7,023 images, 4 classes)</td>
<td valign="top" align="center">VGG16 with Attention mechanisms and Grad-CAM</td>
<td valign="top" align="center">99.00%</td>
<td valign="top" align="center">Complex preprocessing; explainability overhead</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B4">Babu Vimala et al. (2023)</xref></td>
<td valign="top" align="center">Multiple datasets (3 classes)</td>
<td valign="top" align="center">EfficientNet with transfer learning and fine-tuning</td>
<td valign="top" align="center">99.06%</td>
<td valign="top" align="center">Dataset-dependent generalization</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B30">Wong et al. (2025)</xref></td>
<td valign="top" align="center">17,136 augmented MRI images (4 classes)</td>
<td valign="top" align="center">Pretrained VGG16 with extensive data augmentation</td>
<td valign="top" align="center">99.24%</td>
<td valign="top" align="center">High computational and augmentation cost</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B9">Disci et al. (2025)</xref></td>
<td valign="top" align="center">Figshare (7,023 images, 4 classes)</td>
<td valign="top" align="center">Comparative study of multiple CNN architectures</td>
<td valign="top" align="center">98.73% (Xception)</td>
<td valign="top" align="center">Sensitivity to class imbalance</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B34">Zahoor et al. (2024)</xref></td>
<td valign="top" align="center">Kaggle, Br35H, Figshare</td>
<td valign="top" align="center">Res-BRNet (Residual + Regional CNN)</td>
<td valign="top" align="center">98.22%</td>
<td valign="top" align="center">Requires broader external validation</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B24">Rastogi et al. (2025)</xref></td>
<td valign="top" align="center">Kaggle brain tumor dataset</td>
<td valign="top" align="center">Fine-tuned transfer learning (InceptionResNetV2, VGG19, Xception, MobileNetV2)</td>
<td valign="top" align="center">96.11%</td>
<td valign="top" align="center">Performance variability across models</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B1">Ahsan et al. (2025)</xref></td>
<td valign="top" align="center">Brain Tumor Figshare dataset</td>
<td valign="top" align="center">YOLOv5 detection combined with 2D U-Net segmentation</td>
<td valign="top" align="center">89.5% mAP/88.1% DSC</td>
<td valign="top" align="center">Detection&#x2013;segmentation dependency</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B27">Vamsidhar et al. (2025)</xref></td>
<td valign="top" align="center">Multiple MRI datasets</td>
<td valign="top" align="center">Hybrid ResNet101 + Xception with LIME/ViT + RF</td>
<td valign="top" align="center">99.67%</td>
<td valign="top" align="center">Ensemble complexity; high inference cost</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B13">Iftikhar et al. (2025)</xref></td>
<td valign="top" align="center">Figshare (7,023 images, 4 classes)</td>
<td valign="top" align="center">XAI + CNN</td>
<td valign="top" align="center">99% accuracy on seen data and 95% on unseen data</td>
<td valign="top" align="center">Added complexity for deployment</td>
</tr>
<tr>
<td valign="top" align="center"><xref ref-type="bibr" rid="B15">Khandaker et al. (2024)</xref></td>
<td valign="top" align="center">Multi-class Brain MRI datasets</td>
<td valign="top" align="center">Nine-model ensemble with XAI techniques</td>
<td valign="top" align="center">99.83% (DenseNet169)</td>
<td valign="top" align="center">Very high computational overhead</td>
</tr>
</tbody>
</table></table-wrap>
<p>A substantial body of work reports high classification accuracy through architectural modification of established CNN backbones. Residual learning&#x2013;based models, such as modified ResNet-34 architectures with task-specific classification heads, have achieved accuracy levels as high as 99.66% on publicly available MRI datasets, demonstrating the effectiveness of skip connections for stabilizing deep feature learning (<xref ref-type="bibr" rid="B25">Shahin, 2025</xref>). Attention-enhanced VGG-based hybrid frameworks further improve discrimination by reweighting salient feature channels and spatial regions, with reported accuracies approaching 99% on Kaggle and BRaTS datasets (<xref ref-type="bibr" rid="B2">Aiya et al., 2025</xref>). However, these architectures often rely on extensive data augmentation and large parameter counts, limiting computational efficiency and increasing sensitivity to preprocessing choices.</p>
<p>Efficiency-driven transfer learning has been discovered throughout compound-scaled architecture. EfficientNet-based models applying coordinated scaling of network depth, width, and input resolution concession strong performance for multi-class brain tumor classification, with reported test accuracy exceeding 99% (<xref ref-type="bibr" rid="B4">Babu Vimala et al., 2023</xref>). While such designs improve parameter utilization, they still incur nontrivial inference costs and memory footprints, posing challenges for deployment in real-time or resource-constrained clinical environments.</p>
<p>Comparative benchmarking analyses that base performance have a strong dependency in relation to the composition of data, preprocessing steps, and metrics used in model assessment. Large-scale comparative analyses among pretrained models such as Xception, MobileNetV2, InceptionV3, ResNet50, and DenseNet models resolve several top-performing models depending upon various experimental settings (<xref ref-type="bibr" rid="B9">Disci et al., 2025</xref>; <xref ref-type="bibr" rid="B30">Wong et al., 2025</xref>). Such results point out the lack of architectural robustness and raise issues related to the generalizability paradigm.</p>
<p>Contemporary trends in research involve the use of hybrid and region-aware approaches. The combination of residual learning and region-based convolutional layer activities improves spatial and boundary awareness, hence increasing the capability for discrimination in diverse tumor morphologies (<xref ref-type="bibr" rid="B34">Zahoor et al., 2024</xref>). Other approaches involve the combination of the detection and segmentation pipeline, including the use of YOLOv5 with the two-dimensional U-Net architectures, resulting in enhanced Dice coefficient and localization accuracy compared with the classical Mask R-CNN model. This function increases pipeline complexity and reduces model reproducibility (<xref ref-type="bibr" rid="B1">Ahsan et al., 2025</xref>; <xref ref-type="bibr" rid="B24">Rastogi et al., 2025</xref>).</p>
<p>Recent studies have demonstrated the effectiveness of convolutional neural networks (CNNs) for automated medical image classification (<xref ref-type="bibr" rid="B20">Meena et al., 2023</xref>). In the context of brain tumor detection, an improved CNN-based model was proposed to address binary classification of MRI scans by distinguishing tumor and non-tumor cases. The study leveraged data augmentation strategies to improve classification accuracy and reduce training time, reporting strong performance across multiple evaluation metrics such as accuracy, precision, recall, and F1-score. While this work highlights the benefit of augmentation and efficient CNN design, it focuses on binary classification and does not explicitly address computational redundancy or efficiency constraints in multi-class tumor classification scenarios.</p>
<p>Beyond brain tumor imaging, deep learning techniques have also been explored for abnormality detection in other radiological domains. For example, a study on musculoskeletal radiographs employed DenseNet and VGG architectures to identify abnormalities using the large-scale MURA dataset (<xref ref-type="bibr" rid="B8">Choudhary and Meena, 2021</xref>). The authors compared their models against those from the Stanford ML Group MURA Competition using the Cohen Kappa statistic and demonstrated that deep CNNs can achieve performance comparable to or exceeding state-of-the-art approaches in several study types. However, such architectures remain computationally intensive and are not optimized for lightweight or efficiency-oriented deployment.</p>
<p>In contrast to these existing works, the present study focuses on balancing diagnostic accuracy with computational efficiency by integrating Ghost modules and Efficient Channel Attention within a residual learning framework. This design explicitly targets the reduction of redundant feature generation while preserving discriminative power, enabling effective multi-class brain tumor classification under constrained computational budgets.</p>
<p>Interpretability has increasingly attracted attention, thanks to the need in medical applications for interpretability and trust. Parallel CNN ensembles and post-hoc explanation methods, such as LIME and explainable AI methods (XAI), have been used to identify decision-relevant areas (<xref ref-type="bibr" rid="B27">Vamsidhar et al., 2025</xref>). It should, however, be noted that interpretability in these experiments has generally been considered a posteriori or built into specific CNN architectures. Interpretable CNN architectures have shown moderate improvements through extensive regularization and optimization, reaching a maximum of 97.1% accuracy with models based on DenseNet (<xref ref-type="bibr" rid="B13">Iftikhar et al., 2025</xref>). Large-scale evaluations across nine pretrained CNNs continue to favor deep, parameter-intensive networks when accuracy alone is optimized (<xref ref-type="bibr" rid="B15">Khandaker et al., 2024</xref>).</p>
<p>The literature has already proven that near-saturate accuracy can be realized; however, this accuracy usually costs dearly in terms of architectures that are computationally expensive, transparently complicated, and ill-aligned with real-world deployment needs. The open research question fundamentally involves creating architectures that are naturally attentive, inherently bounded with regards to representational redundancy, and faithful to medical diagnosing only through meaningful architectural efficiencies with direct implications for real-world medical innovation and not purely through accuracy gains. The need exists to move away from accuracy-specific scaling and toward relevant architectural efficiencies.</p>
</sec>
<sec id="S3" sec-type="materials|methods">
<label>3</label>
<title>Materials and methods</title>
<p>The proposed model has a series of strongly integrated steps in order to efficiently classify the brain tumor from MRI scans. Step 1 involves the normalization of the input image by resizing into 224 &#x00D7; 224 resolution images, intensity normalization, contrast enhancement using CLAHE, along with specific data augmentation strategies. This is aimed at reducing the variability of contrast induced during the acquisition procedure while retaining the anatomical structures. Step 2 involves the use of the improved ResNet50 architecture to learn the spatial and semantic features from the input image. It uses residual connections to promote stable optimization. Step 3 involves the incorporation of Ghost blocks into the model. They work on the principle of using lightweight depth-wise operations to discard the redundancy of features. Step 4 involves the use of ECA (Efficient Channel Attention), a strategy that enhances the model features using the entire context. It boosts the desired features. Step 5 involves the use of a classifier that involves a lobal average pooling followed by a regularized classification head (e.g., dropout/weight decay). Its performance is measured using accuracy, precision, sensitivity, specificity, F1-score. As illustrated in <xref ref-type="fig" rid="F1">Figure 1</xref> the model pipeline begins with MRI inputs that are processed through a deep convolutional backbone to extract high-level spatial and semantic feature representations.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>Overview of the proposed improved ResNet50 architecture for brain tumor classification from MRI images.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1778376-g001.tif">
<alt-text content-type="machine-generated">Flowchart diagram presenting a deep learning pipeline for brain MRI classification. Stages include input image preprocessing, improved ResNet50 model layers, classification into brain tumor, glioma, and meningioma, and display of evaluation metrics such as accuracy, precision, sensitivity, specificity, F1 score, and area under the curve.</alt-text>
</graphic>
</fig>
<sec id="S3.SS1">
<label>3.1</label>
<title>Dataset and preprocessing</title>
<sec id="S3.SS1.SSS1">
<label>3.1.1</label>
<title>Dataset collection</title>
<p>The experimental evaluation was directed using the Bangladesh Brain Cancer MRI Dataset which contains 6,056 magnetic resonance images distributed across three related categories: glioma (2,004 images), meningioma (2,004 images) and pituitary related brain tumors (2,048 images) (<xref ref-type="bibr" rid="B23">Rahman, 2024</xref>). The dataset was collected through collaborations with multiple hospitals across Bangladesh and met in consultation with medical specialists to guarantee diagnostic validity and clinical relevance. All images were uniformly resized to a spatial resolution of 224 &#x00D7; 224 pixels providing a consistent input format while preserving anatomically meaningful detail. Remaining to its multi-institutional origin and balanced class composition, the dataset gains a representative range of tumor attendances and imaging conditions met in routine clinical practice. These attributes make it well suited for the development and evaluation of computerized diagnostic models in brain tumor imaging. An 80/10/10 split ratio was applied at the patient level, ensuring that no subject overlap exists across subsets. This strategy prevents correlated samples from the same patient appearing in multiple splits and avoids artificially inflated performance estimates (<xref ref-type="bibr" rid="B32">Yagis et al., 2021</xref>). Typical MRI samples from each tumor category are explained in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>Dataset representation.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1778376-g002.tif">
<alt-text content-type="machine-generated">Three MRI scans of human brains displayed side by side: the left image labeled &#x201C;Brain Glioma&#x201D; shows an abnormal mass in the brain, the center image labeled &#x201C;Brain Menin&#x201D; displays a rounded lesion, and the right image labeled &#x201C;Brain Tumor&#x201D; features another abnormal growth within the brain.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS1.SSS2">
<label>3.1.2</label>
<title>Preprocessing image</title>
<p>Image preprocessing is a very basic step in the process of stabilizing the pipeline for deep learning, and this is especially true when it comes to using MRI images in the medical or healthcare sector, wherein MRI images are generally prone to noise, contrast, and artifacts based on the acquisition process. Even though there are certain sources of uncertainty, the brain MRI images have been exposed to a series of correctly chosen techniques based on the need for spatial resolution, local contrast enhancement, noise reduction, and clear demarcation around the edges of the tumor. All the images had to undergo resizing based on a unified spatial resolution of <italic>I</italic> &#x2208; &#x211D;<sup>224&#x00D7;224&#x00D7;3</sup> so that there would be complete congruence with the ResNet-50 backbone network and no inconsistencies with tensor dimensions while undergoing forward and backward passes. The pixel intensity values, which lie in the range [0, 255], have been converted to the range [0, 1]. However, this process has the effect of stabilizing the gradient magnitudes and, therefore, increases the speed of convergence and prevents possible instability of the optimization algorithm due to gradient computations (<xref ref-type="bibr" rid="B16">Li et al., 2021</xref>). For this reason, because of the intrinsic nature of low-contrast images of the kind produced by MRI, Contrast-Limited Adaptive Histogram Equalization (CLAHE) had to be carried out (<xref ref-type="bibr" rid="B35">Zuiderveld, 1994</xref>). Images were first converted from RGB color space to the LAB representation; subsequently, CLAHE was performed only on the luminance channel, which was then combined with chromatic information and transformed back to RGB space (<xref ref-type="bibr" rid="B21">Mishra, 2021</xref>). This procedure significantly enhances the delineation of tumor margins and internal texture while maintaining consistency in global intensity.</p>
<disp-formula id="S3.E1">
<mml:math id="M1">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>G</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mo>&#x2192;</mml:mo>
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>A</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>I</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(1)</label></disp-formula>
<disp-formula id="S3.E2">
<mml:math id="M2">
<mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>D</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo rspace="5.8pt" stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:munderover>
<mml:mstyle displaystyle="true"><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo></mml:mstyle>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>i</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mi>k</mml:mi>
</mml:munderover>
<mml:mfrac>
<mml:mrow>
<mml:mi>h</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mi>N</mml:mi>
</mml:mfrac>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(2)</label></disp-formula>
<p>In this case, the local intensity histogram is expressed by <italic>h</italic>(<italic>i</italic>), with <italic>N</italic> being the total number of pixels for each contextual image portion. The resulting display is utilized for calculating the adjusted luminance values; afterwards, the enhanced light image is combined with the chrominance channels to revert to the RGB color space. The approach expands the definition of the tumor boundaries and reveals the tumor heterogeneity, while the adaptive clipping limits the contrast enhancement and prevents overemphasis. Before feature extraction, Gaussian filtering was employed to attenuate high-frequency noise introduced during image acquisition and to suppress unrelated background fluctuations. Smoothing of images was done by convolution with a Gaussian kernel. The standard deviation parameter &#x03C3; controls the amount of smoothing. This operation effectively reduces random noise while retaining critical anatomical structures that are important for reliable downstream feature learning.</p>
<disp-formula id="S3.E3">
<mml:math id="M3">
<mml:mrow>
<mml:mrow>
<mml:msup>
<mml:mi>L</mml:mi>
<mml:msup>
<mml:mi/>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
</mml:msup>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo rspace="5.8pt" stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>L</mml:mi>
<mml:mrow>
<mml:mtext>max</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>-</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>&#x22C5;</mml:mo>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>D</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(3)</label></disp-formula>
<disp-formula id="S3.E4">
<mml:math id="M4">
<mml:mrow>
<mml:mrow>
<mml:mi>G</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>y</mml:mi>
<mml:mo rspace="5.8pt" stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>&#x03C0;</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msup>
<mml:mi>&#x03C3;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x2062;</mml:mo>
<mml:mtext>exp</mml:mtext>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>-</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>+</mml:mo>
<mml:msup>
<mml:mi>y</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#x2062;</mml:mo>
<mml:msup>
<mml:mi>&#x03C3;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(4)</label></disp-formula>
<disp-formula id="S3.E5">
<mml:math id="M5">
<mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mtext>blur</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo rspace="5.8pt" stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:munder>
<mml:mstyle displaystyle="true"><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo></mml:mstyle>
<mml:mrow>
<mml:mi>u</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>v</mml:mi>
</mml:mrow>
</mml:munder>
<mml:mrow>
<mml:mi>G</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>u</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>v</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>I</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>-</mml:mo>
<mml:mi>u</mml:mi>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>-</mml:mo>
<mml:mi>v</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(5)</label></disp-formula>
<p>Following noise attenuation, tumor boundary definition was selectively intensified using unsharp masking, a technique designed to accentuate high-frequency structural details without distorting underlying anatomy. In this stage, a smoothed calculation of the original image (<italic>I</italic><sub><italic>blur</italic></sub>) is first generated, after which edge information is secluded by withdrawing the obscured component from the original signal. The detail layer is then reintegrated to produce a sharpened image, yielding clearer lesion contours and improved spatial discrimination while preserving tissue integrity.</p>
<disp-formula id="S3.E6">
<mml:math id="M6">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mtext>detail</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>-</mml:mo>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mtext>blur</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(6)</label></disp-formula>
<disp-formula id="S3.E7">
<mml:math id="M7">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mtext>sharp</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x22C5;</mml:mo>
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mtext>detail</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(7)</label></disp-formula>
<p>In this case, <italic>K</italic> represents a scalar value responsible for controlling the strength of boundary enhancement, which has a direct impact on boundary intensity and, consequently, image spatial discrimination. However, this enhancement procedure alone leads to only a moderate and somewhat blurry definition of boundaries in favor of image spatial discrimination and amplifies convergent structural details by emphasizing second-order intensity variations with Laplacian operators without compromising image spatial discrimination. To enhance image coherence and reduce irregularities in intensity distributions inside tumors, a morphological closing procedure was performed to reduce irregularities inside tumors, followed by second-order intensity variation enhancement with Laplacian operators to further emphasize structural details with a direct impact on image spatial discrimination. A morphological closing, performed to enhance image coherence and reduce irregularities, represents a combination of dilations and erosions with a given structuring element <italic>K</italic>, which, in this case, is elliptical, providing a direct impact on fill-in processes of small cavities inside tumors and image spatial discrimination, as follows:</p>
<disp-formula id="S3.E8">
<mml:math id="M8">
<mml:mrow>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>&#x22C5;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>K</mml:mi>
</mml:mpadded>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>&#x2295;</mml:mo>
<mml:mi>K</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>&#x2296;</mml:mo>
<mml:mi>K</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(8)</label></disp-formula>
<disp-formula id="S3.E9">
<mml:math id="M9">
<mml:mrow>
<mml:mrow>
<mml:mo>&#x2207;</mml:mo>
<mml:mo>&#x2061;</mml:mo>
<mml:mrow>
<mml:msup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>I</mml:mi>
</mml:mpadded>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x03B1;</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msup>
<mml:mi>x</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
<mml:mo>+</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi>&#x03B1;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>I</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x03B1;</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:msup>
<mml:mi>y</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(9)</label></disp-formula>
<disp-formula id="S3.E10">
<mml:math id="M10">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>I</mml:mi>
<mml:mrow>
<mml:mtext>enhanced</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi mathvariant="normal">&#x03B1;</mml:mi>
<mml:mo>&#x22C5;</mml:mo>
<mml:mrow>
<mml:msup>
<mml:mo>&#x2207;</mml:mo>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>I</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(10)</label></disp-formula>
<p>The value of &#x03B1;determines the degree to which edge information is highlighted, making it possible to adjust the importance of the structure without sharp edges being created artificially. The step-by-step process of improvement, noise removal, edge enhancement, and morphological optimization helps to achieve images with prominently improved quality concerning the clarity and definition of the tumor region. The whole preprocessing step makes it possible to increase the definition of the boundaries and the visibility of the tissue inside, achieving a representation that is suitable only for the extraction of deep features.</p>
</sec>
</sec>
<sec id="S3.SS2">
<label>3.2</label>
<title>Model building</title>
<p>ResNet-50, despite its great deep feature extraction capability, has a high computational cost, feature redundancy, and no clear attention mechanism as its drawbacks. These drawbacks lead to a lesser extent of efficiency and less capability to bring out the tumor-relevant features in brain MRI images. The Ghost Module is integrated to tackle these issues by removing redundant feature maps and cutting down the computational load via a cheap feature generation. Simultaneously, the Efficient Channel Attention (ECA) technique is utilized to adaptively channel-wise features and boost the tumor&#x2019;s discriminative representations. The integrated structure allows the proposed architecture to deliver heightened efficiency, precision, and selectivity in features.</p>
<p>ResNet-50 is a deep convolutional architecture comprising fifty layers and is founded on the principle of residual learning, originally introduced to mitigate degradation and vanishing-gradient phenomena in very deep networks (<xref ref-type="bibr" rid="B29">Wen et al., 2020</xref>). Rather than learning a direct mapping <italic>H</italic>(<italic>x</italic>), each residual block is formulated to learn a residual function <italic>F</italic>(<italic>x</italic>), such that the block output is defined as</p>
<disp-formula id="S3.E11">
<mml:math id="M11">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>y</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mi>x</mml:mi>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(11)</label></disp-formula>
<p>where <italic>x</italic>denotes the input feature map and <italic>F</italic>(<italic>x</italic>) represents the transformation learned by the stacked convolutional layers. The identity shortcut connection enables gradients to propagate directly through the network during backpropagation, stabilizing optimization and facilitating the effective training of deep architectures (<xref ref-type="bibr" rid="B19">Liu et al., 2023</xref>).</p>
<p>Each bottleneck residual block in ResNet-50 consists of a sequence of three convolutional layers arranged in a 1&#x00D7;1, 3&#x00D7;3, and 1&#x00D7;1 configuration. The initial 1&#x00D7;1 convolution performs channel dimensionality reduction, the 3&#x00D7;3convolution extracts spatial features, and the final 1&#x00D7;1convolution restores the original channel depth. Given an input tensor X &#x2208; &#x211D;<sup><italic>H</italic>&#x00D7;<italic>W</italic>&#x00D7;<italic>C</italic></sup>, the bottleneck transformation is expressed as</p>
<disp-formula id="S3.E12">
<mml:math id="M12">
<mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mtext>X</mml:mtext>
<mml:mo rspace="5.8pt" stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mtext>W</mml:mtext>
<mml:mn>3</mml:mn>
</mml:msub>
<mml:mo>&#x002A;</mml:mo>
<mml:mi mathvariant="normal">&#x03C3;</mml:mi>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mtext>W</mml:mtext>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x002A;</mml:mo>
<mml:mi mathvariant="normal">&#x03C3;</mml:mi>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mtext>W</mml:mtext>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x002A;</mml:mo>
<mml:mtext>X</mml:mtext>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(12)</label></disp-formula>
<p>where W<sub>1</sub>,W<sub>2</sub>,W<sub>3</sub> denote convolutional kernels, &#x002A;represents convolution, and &#x03C3;(&#x22C5;) is the ReLU activation. The block output is obtained via residual summation:</p>
<disp-formula id="S3.E13">
<mml:math id="M13">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mtext>Y</mml:mtext>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mtext>X</mml:mtext>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mi mathvariant="normal">X</mml:mi>
</mml:mrow>
</mml:mrow>
<mml:mo>.</mml:mo>
</mml:mrow>
</mml:math>
<label>(13)</label></disp-formula>
<p>Batch normalization is employed after every single convolutional layer to normalize feature distributions and accelerate convergence. Concluded succeeding stages, ResNet-50 gradually encodes visual information, starting with low-level edges and passion gradients, advancing to mid-level texture and structural patterns and culminating in high-level semantic illustrations associated with tumor morphology and spatial extent. For an input MRI image I (<xref ref-type="bibr" rid="B31">Yadav et al., 2024</xref>), the backbone produces a deep feature tensor</p>
<disp-formula id="S3.E14">
<mml:math id="M14">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mtext>F</mml:mtext>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mtext>ResNet</mml:mtext>
<mml:mo>-</mml:mo>
<mml:mrow>
<mml:mn>50</mml:mn>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mtext>I</mml:mtext>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(14)</label></disp-formula>
<p>where F &#x2208; &#x211D;<sup><italic>H</italic>&#x2032;&#x00D7;<italic>W</italic>&#x2032;&#x00D7;<italic>C</italic>&#x2032;</sup> serves as the foundational representation for subsequent feature refinement and attention mechanisms.</p>
<p>As illustrated in <xref ref-type="fig" rid="F3">Figure 3</xref> the network begins with a 7&#x00D7;7 convolution followed by 3&#x00D7;3 max pooling to extract initial features. It then traverses four residual stages (Stages 1&#x2013;4), each composed of multiple bottleneck blocks with skip connections to preserve information flow and suppress gradient attenuation. Global average pooling precedes the output layer, which generates the final classification prediction.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>Detailed schematic of the ResNet-50 backbone, illustrating progressive feature extraction from MRI input to output prediction.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1778376-g003.tif">
<alt-text content-type="machine-generated">Diagram illustrating a convolutional neural network workflow for image processing, showing sequential stages with convolutional layers, max pooling, average pooling, and labeled transitions from input medical images to output predictions.</alt-text>
</graphic>
</fig>
<sec id="S3.SS2.SSS1">
<label>3.2.2</label>
<title>Ghost module</title>
<p>The Ghost Module is introduced to specifically handle the high computational complexity and feature redundancy caused by traditional convolutional layers (<xref ref-type="bibr" rid="B7">Chen et al., 2022</xref>). The key idea behind the Ghost Module is that most generated feature maps by traditional convolutional layers are strongly correlated and, hence, unnecessary to calculate through the convolutional operation (<xref ref-type="bibr" rid="B11">Han et al., 2020</xref>). Contrary to traditional convolutional layers, where the entire result is computed, the Ghost Module proposes the generation process to consist of two tasks: the identification of intrinsic feature maps and the production of the rest by low-cost transformations.</p>
<p>Formally, given an input feature tensor, a standard convolution produces an output <bold>Y</bold> &#x2208; &#x211D;<sup><italic>H</italic>&#x00D7;<italic>W</italic>&#x00D7;<italic>C</italic></sup> via the operation</p>
<disp-formula id="S3.E15">
<mml:math id="M15">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>Y</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:msup>
<mml:mi>W</mml:mi>
<mml:mo>&#x002A;</mml:mo>
</mml:msup>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>X</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(15)</label></disp-formula>
<p>where W denotes the convolutional kernel and &#x002A;represents convolution. This operation becomes increasingly expensive as the number of output channels <italic>C</italic><sub><italic>out</italic></sub> grows. In contrast, the Ghost Module first generates a reduced set of intrinsic features F<sub><italic>p</italic></sub> using a lightweight convolutional kernel W<sub><italic>p</italic></sub>, producing <italic>C</italic><sub><italic>int</italic></sub> channels, where <italic>C</italic><sub>int</sub>&#x226A;<italic>C</italic><sub>out</sub>:</p>
<disp-formula id="S3.E16">
<mml:math id="M16">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mi>p</mml:mi>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mi>p</mml:mi>
</mml:msub>
<mml:mo>&#x002A;</mml:mo>
<mml:mi>X</mml:mi>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(16)</label></disp-formula>
<p>The remaining feature maps, referred to as ghost features, are obtained by applying inexpensive linear transformations &#x03A6;(&#x22C5;), typically implemented as depthwise convolutions:</p>
<disp-formula id="S3.E17">
<mml:math id="M17">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mi>g</mml:mi>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mi mathvariant="normal">&#x03A6;</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mi>p</mml:mi>
</mml:msub>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(17)</label></disp-formula>
<p>The intrinsic and ghost features are then concatenated,</p>
<disp-formula id="S3.E18">
<mml:math id="M18">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mrow>
<mml:mtext>ghost</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">[</mml:mo>
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mi>p</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mi>g</mml:mi>
</mml:msub>
<mml:mo stretchy="false">]</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo>.</mml:mo>
</mml:mrow>
</mml:math>
<label>(18)</label></disp-formula>
<p>and subsequently fused using a 1=1 convolution:</p>
<disp-formula id="S3.E19">
<mml:math id="M19">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>Y</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>W</mml:mi>
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x00D7;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x002A;</mml:mo>
<mml:msub>
<mml:mi>F</mml:mi>
<mml:mrow>
<mml:mtext>ghost</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
<mml:mo>.</mml:mo>
</mml:mrow>
</mml:math>
<label>(19)</label></disp-formula>
<p>It significantly alleviates the floating-point calculations and the number of parameters compared with the conventional convolutions while maintaining the representational ability (<xref ref-type="bibr" rid="B7">Chen et al., 2022</xref>). In the brain MRI classification problem, the Ghost Module helps to efficiently suppress the redundant feature propagation introduced by the ResNet-50 backbone network, allowing the model to pay more attention to the tumor-related features.</p>
</sec>
<sec id="S3.SS2.SSS2">
<label>3.2.3</label>
<title>Efficient channel attention</title>
<p>To further improve features, the suggested architecture incorporates the Efficient Channel Attention (ECA) module (<xref ref-type="bibr" rid="B28">Wang et al., 2020</xref>). Different conventional attention mechanisms that depend on reduction and completely associated transformations, ECA approves a lightweight, channel-wise strategy that preserves the original channel dimensionality while capturing inter-channel dependencies at minimal computational cost. This design choosing is mostly advantageous in medical imaging where subtle intensity and texture variations carry diagnostic significance.</p>
<p>Given an input feature tensor X &#x2208; &#x211D;<sup><italic>H</italic>&#x00D7;<italic>W</italic>&#x00D7;<italic>C</italic></sup>, ECA first applies Global Average Pooling (GAP) to condense spatial information into a compact channel descriptor (<xref ref-type="bibr" rid="B5">Bakr et al., 2022</xref>). For each channel <italic>c</italic>, the descriptor is computed as</p>
<disp-formula id="S3.E20">
<mml:math id="M20">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mfrac>
<mml:mn>1</mml:mn>
<mml:mrow>
<mml:mi>H</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>W</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:munderover>
<mml:mstyle displaystyle="true"><mml:mo largeop="true" movablelimits="false" symmetric="true">&#x2211;</mml:mo></mml:mstyle>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>i</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>H</mml:mi>
</mml:munderover>
<mml:msubsup>
<mml:mi mathvariant="normal">&#x03A3;</mml:mi>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>j</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mi>W</mml:mi>
</mml:msubsup>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>c</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>c</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>,</mml:mo>
<mml:mn>2</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">&#x2026;</mml:mi>
<mml:mo>.</mml:mo>
<mml:mo>.</mml:mo>
<mml:mo>,</mml:mo>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:math>
<label>(20)</label></disp-formula>
<p>forming the channel vector</p>
<disp-formula id="S3.E21">
<mml:math id="M21">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mtext>z</mml:mtext>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">[</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi>z</mml:mi>
<mml:mi>C</mml:mi>
</mml:msub>
<mml:mo stretchy="false">]</mml:mo>
</mml:mrow>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>&#x211D;</mml:mi>
<mml:mi>C</mml:mi>
</mml:msup>
<mml:mo>.</mml:mo>
</mml:mrow>
</mml:math>
<label>(21)</label></disp-formula>
<p>Rather than employing fully connected layers, ECA models local cross-channel interactions using a one-dimensional convolution with an adaptively determined kernel size <italic>k</italic>. The kernel size is defined as</p>
<disp-formula id="S3.E22">
<mml:math id="M22">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mi>k</mml:mi>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mo stretchy="false">|</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mtext>log</mml:mtext>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>C</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mi mathvariant="normal">&#x03B3;</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mi>b</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">|</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mtext>odd</mml:mtext>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(22)</label></disp-formula>
<p>where &#x03B3;and <italic>b</italic>control the receptive field and |&#x22C5;|<sub>odd</sub> enforces an odd kernel size to maintain symmetric convolution. The resulting attention weights are passed through a sigmoid activation and applied to rescale the original feature tensor:</p>
<disp-formula id="S3.E23">
<mml:math id="M23">
<mml:mrow>
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:msubsup>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>c</mml:mi>
</mml:mrow>
<mml:msup>
<mml:mi/>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
</mml:msubsup>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mi>a</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
<mml:mo>&#x22C5;</mml:mo>
<mml:msub>
<mml:mi>x</mml:mi>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>j</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>c</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(23)</label></disp-formula>
<p>where <italic>a<sub>c</sub></italic>denotes the learned importance of the <italic>c</italic>-th channel.</p>
<p>It is observed in adaptive reweighting, where the process favors the enhancement of tumor-related channels and suppresses the contributions of less informative background channels. But more importantly, ECA does all this with a computation cost practically equivalent to a squeeze-and-excitation (SE) block, when precision is a crucial concern in a resolution-heavy medical image classification problem.</p>
</sec>
<sec id="S3.SS2.SSS3">
<label>3.2.4</label>
<title>Integrated model architecture and training framework</title>
<p>In particular, the proposed architecture is built by the conscious incorporation of three complementary components: a backbone of ResNet-50, a Ghost feature generation module, and a mechanism of Efficient Channel Attention. Such a design addresses the main limitations that are conventionally associated with standard deep convolutional networks in the context of medical image classification, such as high computational cost, feature redundancy, and lack of explicit attention modeling. ResNet-50 was chosen as the important feature extractor exactly for its recognized capability to realize hierarchical spatial and semantic representations. However, direct utilization of the backbone directly provides dense feature maps with significant redundancy and low interpretability.</p>
<p>For the mitigation of these issues, the output feature maps from ResNet-50 are transformed by the Ghost Module. This module reformulates the convolution process by forming a small set of inherent feature maps and extracting the others from them through low-cost linear transformations. This practice significantly reduces the generation of unnecessary features while retaining expressiveness, resulting in a significantly reduced computational cost without impairing discrimination. After Ghost-based Refining, channel-wise feature recalibration was carried out through Efficient Channel Attention. ECA attentioned that ResNet-50 inherently flattened all channels equally; thus, data-driven channel weighing was encouraged in ECA to emphasize selected channels that are dwelt in tumorous conditions by down-weighting channels that are dwelt in the background scenes.</p>
<p>The refined representations obtained then undergo global average pooling and then pass through a fully connected classification head, forming an end-to-end trainable system. Optimization of the models has been conducted using supervised mini-batch gradient descent with Adam optimization to ensure convergent performance. Dropout regularization has also been adopted to handle overfitting issues. Combining all these modules together, this comprehensive system addresses the primary weakness of ResNet-50, which consists of computational inefficiency, feature redundancy, and poor attention sensitivity, with a fair balance between accuracy, robustness, and computational efficiency.</p>
<p>In the proposed architecture, feature embeddings are obtained from the high-level convolutional feature maps produced after the Ghost-ResNet backbone enhanced with Efficient Channel Attention blocks. These embeddings represent compact, high-dimensional representations of MRI scans, capturing both spatial and channel-wise discriminative information. Global Average Pooling (GAP) is applied to transform the feature maps into fixed-length embedding vectors, which are then fed into the final classification layers for multi-class tumor prediction.</p>
</sec>
</sec>
<sec id="S3.SS3">
<label>3.3</label>
<title>Implementation and training details</title>
<p>The proposed Efficient Attention-Based Ghost-ResNet model was implemented using the PyTorch deep learning framework and trained under unified experimental settings to ensure reproducibility and fair comparison with baseline models. Training was conducted for 10 epochs using the Adam optimizer with an initial learning rate of 1 &#x00D7; 10<sup>&#x2013;4</sup> and a batch size of 32, while categorical cross-entropy was employed as the loss function for multi-class classification. To reduce overfitting, a weight decay of 1 &#x00D7; 10<sup>&#x2013;5</sup> and a dropout rate of 0.5 in the fully connected layers were applied. The network was initialized with ImageNet-pretrained weights, where early convolutional layers were initially frozen to preserve generic feature representations, followed by gradual unfreezing of higher-level layers for fine-tuning using a reduced learning rate. A learning rate scheduler was utilized to decrease the learning rate upon stagnation of the validation loss. All experiments were performed using a fixed random seed to ensure consistent results and were executed on a workstation equipped with an NVIDIA GPU and 32 GB of RAM.</p>
</sec>
<sec id="S3.SS4">
<label>3.4</label>
<title>Evaluation metrics and statistical analysis</title>
<p>The performance of the models has been evaluated using a variety of statistics to ensure that the models are evaluated effectively and fairly (<xref ref-type="bibr" rid="B12">Hossin and Sulaiman, 2015</xref>). Finally, the proposed model has been validated on a test dataset containing MRI scans that are not used during training to prevent any leakage of information. Let TP, TN, FP, and FN represent the true positives, true negatives, false positives, and false negatives, respectively.</p>
<p>Overall classification accuracy was adopted as the primary metric, as it reflects the proportion of correctly classified instances across all tumor categories:</p>
<disp-formula id="S3.E24">
<mml:math id="M24">
<mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>u</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>y</mml:mi>
</mml:mpadded>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(24)</label></disp-formula>
<p>While accuracy provides a global measure of correctness, it may obscure class-specific behavior, particularly in multi-class or imbalanced settings. To address this limitation, precision and sensitivity were additionally reported to capture predictive reliability and detection capability. Precision quantifies the proportion of correctly identified positive cases among all predicted positives:</p>
<disp-formula id="S3.E25">
<mml:math id="M25">
<mml:mrow>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>o</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mi>n</mml:mi>
</mml:mpadded>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(25)</label></disp-formula>
<p>Sensitivity, also referred to as recall, measures the model&#x2019;s ability to correctly identify true positive cases:</p>
<disp-formula id="S3.E26">
<mml:math id="M26">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mtext>Sensitivity</mml:mtext>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(26)</label></disp-formula>
<p>Specificity was included to evaluate correct identification of negative cases:</p>
<disp-formula id="S3.E27">
<mml:math id="M27">
<mml:mrow>
<mml:mpadded width="+3.3pt">
<mml:mtext>Specificity</mml:mtext>
</mml:mpadded>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(27)</label></disp-formula>
<p>Because precision and sensitivity capture complementary aspects of performance, the F1-score was computed as their harmonic means to provide a balanced summary metric:</p>
<disp-formula id="S3.E28">
<mml:math id="M28">
<mml:mrow>
<mml:mrow>
<mml:mi>F</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mpadded width="+3.3pt">
<mml:mn>1</mml:mn>
</mml:mpadded>
</mml:mrow>
<mml:mo rspace="5.8pt">=</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:mo>&#x22C5;</mml:mo>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>o</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mo>&#x22C5;</mml:mo>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>l</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>l</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>r</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>s</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>o</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mo>+</mml:mo>
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>e</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>c</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>a</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>l</mml:mi>
<mml:mo>&#x2062;</mml:mo>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
<label>(28)</label></disp-formula>
</sec>
</sec>
<sec id="S4" sec-type="results">
<label>4</label>
<title>Results</title>
<p>The proposed model was fine-tuned using transfer learning. The initial layers of the backbone network were initialized with ImageNet-pretrained weights and frozen during early training stages to preserve low-level feature representations. Subsequently, deeper layers were gradually unfrozen and fine-tuned using a lower learning rate to adapt the model to MRI-specific features. Adam optimizer was employed with a learning rate scheduling strategy to ensure stable convergence. This progressive fine-tuning approach significantly improved convergence speed and classification performance.</p>
<p>The proposed construction was estimated on the Bangladesh Brain Cancer MRI Dataset which involves of 6,056 images distributed across three tumor categories: glioma (2,004 images), meningioma (2,004 images) and pituitary tumors (2,048 images). The dataset was subdivided into training, validation and testing subsets using an 80/10/10 split. Model optimization was accomplished over 10 epochs, utilizing the Adam optimizer with a learning rate of 1&#x00D7;10<sup>&#x2212;4</sup>. Performance was assessed using accuracy, precision, sensitivity (recall), specificity, and F1-score to ensure a comprehensive and meaningful evaluation of classification behavior.</p>
<sec id="S4.SS1">
<label>4.1</label>
<title>Comparative analysis against transfer-learning baselines</title>
<p>The comparative results summarized in <xref ref-type="table" rid="T2">Table 2</xref> and visualized in <xref ref-type="fig" rid="F4">Figure 4</xref> can be interpreted through the lens of Efficiency Accuracy Trade-off Theory, which posits that predictive performance in deep learning does not increase linearly with network depth or parameter count once representational saturation is reached. VGG16 occupies a middling position realizing 91.58% accuracy with precision and sensitivity values of 91.72 and 91.57%, respectively. It serves, in effect, as a functional baseline. Increasing architectural depth in isolation does not appear to confer an advantage: VGG19 registers a decline to 87.29 accuracy and 89.37% precision, which suggests that deeper stacks absent task-aligned refinements do not necessarily enhance discrimination in MRI-based tumor analysis. This pattern becomes more pronounced with ResNet50.</p>
<table-wrap position="float" id="T2">
<label>TABLE 2</label>
<caption><p>Comparative performance analysis of brain tumor classification models.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="center">Model</th>
<th valign="top" align="center">Accuracy (%)</th>
<th valign="top" align="center">Precision (%)</th>
<th valign="top" align="center">Sensitivity (%)</th>
<th valign="top" align="center">Specificity (%)</th>
<th valign="top" align="center">F1-Score (%)</th>
<th valign="top" align="center">&#x0394; Accuracy vs. baseline</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">ResNet50</td>
<td valign="top" align="center">76.07</td>
<td valign="top" align="center">76.28</td>
<td valign="top" align="center">75.98</td>
<td valign="top" align="center">88.03</td>
<td valign="top" align="center">75.86</td>
<td valign="top" align="center">&#x2013;20.13%</td>
</tr>
<tr>
<td valign="top" align="center">VGG19</td>
<td valign="top" align="center">87.29</td>
<td valign="top" align="center">89.37</td>
<td valign="top" align="center">87.33</td>
<td valign="top" align="center">93.67</td>
<td valign="top" align="center">87.53</td>
<td valign="top" align="center">&#x2013;8.91%</td>
</tr>
<tr>
<td valign="top" align="center">VGG16</td>
<td valign="top" align="center">91.58</td>
<td valign="top" align="center">91.72</td>
<td valign="top" align="center">91.57</td>
<td valign="top" align="center">95.79</td>
<td valign="top" align="center">91.6</td>
<td valign="top" align="center">&#x2013;4.62%</td>
</tr>
<tr>
<td valign="top" align="center">Xception</td>
<td valign="top" align="center">93.89</td>
<td valign="top" align="center">94.11</td>
<td valign="top" align="center">93.9</td>
<td valign="top" align="center">96.95</td>
<td valign="top" align="center">93.93</td>
<td valign="top" align="center">&#x2013;2.31%</td>
</tr>
<tr>
<td valign="top" align="center">DenseNet121 (Baseline)</td>
<td valign="top" align="center">96.2</td>
<td valign="top" align="center">96.37</td>
<td valign="top" align="center">96.23</td>
<td valign="top" align="center">98.1</td>
<td valign="top" align="center">96.22</td>
<td valign="top" align="center">Baseline</td>
</tr>
<tr>
<td valign="top" align="center">Proposed model</td>
<td valign="top" align="center"><bold>97.85</bold></td>
<td valign="top" align="center">97.84</td>
<td valign="top" align="center"><bold>97.85</bold></td>
<td valign="top" align="center"><bold>98.93</bold></td>
<td valign="top" align="center"><bold>97.85</bold></td>
<td valign="top" align="center"><bold>+1.65%</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>The bold values indicate the best performance achieved among all compared models for each evaluation metric (Accuracy, Precision, Sensitivity, Specificity, and F1-Score). These values highlight the superior performance of the proposed Efficient Attention-Based Ghost-ResNet model compared to the baseline and other existing models.</p></fn>
</table-wrap-foot>
</table-wrap>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>Comparative assessment of classification performance across baseline and proposed models using clinically relevant evaluation metrics.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1778376-g004.tif">
<alt-text content-type="machine-generated">Grouped bar charts compare six models across accuracy, precision, sensitivity, specificity, and F1-score using horizontal bars. The Proposed Model consistently achieves the highest percentage near one hundred in all metrics, while ResNet50 scores noticeably lower across all panels.</alt-text>
</graphic>
</fig>
<p>Model complexity was assessed by comparing the total number of trainable parameters and floating-point operations (FLOPs). All comparative models were implemented under identical experimental settings to ensure fairness. The proposed model with ECA demonstrated a significantly lower parameter count compared to standard ResNet and DenseNet architectures while achieving superior classification accuracy, highlighting its efficiency-oriented design.</p>
<p>Despite its reputation on natural image recognition, performance here contracts to 76.07% accuracy and 76.28% precision, a result which seems to reflect a mismatch between generic residual pathways and the morphological variability inherent to brain tumors. This clearly reflected in the relatively weak performance of ResNet50, attaining only 76.07% accuracy against its depth and residual connectivity.</p>
<p>Such findings imply that architectural depth is not enough to model the subtle, heterogeneous appearance of brain tumors in MRI in the absence of explicit consideration of feature selectivity and controlling redundancy. Conversely, architecture targeted at the prior for productive feature utilization, such as Xception and DenseNet121, yielded much stronger performance. These also evidence the observation that a compact and well-structured feature representation is better suited for medical image classification tasks. The proposed model extends this pattern by explicitly controlling feature redundancy through Ghost modules while reinforcing channel discriminative capacity with Efficient Channel Attention. This has collectively resulted in the best performance on all metrics, reaching 97.85% accuracy, 97.85% sensitivity, and 98.93% specificity, which gives an absolute accuracy gain of 1.65% over the strongest baseline model, DenseNet121. Based on the F1-score of 97.85%, improvement does not arise from class-selective bias.</p>
<p><xref ref-type="fig" rid="F5">Figure 5</xref> represents box plot of these relationships visually, though the evidence alone is sufficient to support this interpretation. The proposed model clearly defines the upper envelope of performance. It achieves the highest median F1-score (&#x2248;0.98) while simultaneously exhibiting the narrowest interquartile range and minimal overlap with competing methods. This concentration indicates not only superior average performance but also strong resistance to fold-specific fluctuations. In practical terms, the proposed architecture delivers the most reliable balance between precision and sensitivity under repeated data partitioning, a property of relevance for clinical deployment where performance instability can undermine diagnostic trust.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>Boxplot of performance distribution observed across five widely adopted transfer-learning architectures.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1778376-g005.tif">
<alt-text content-type="machine-generated">Box plot displaying F1-scores from five-fold cross-validation across six models: VGG16, VGG19, ResNet50, Xception, DenseNet121, and Proposed. Proposed model demonstrates the highest median F1-score, followed by DenseNet121 and Xception.</alt-text>
</graphic>
</fig>
<p>More superior structural design, especially Xception and DenseNet121, achieves stronger and more stable performance. DenseNet121 affords a modest baseline with an accuracy of 96.20% and an F1-score of 96.22% proposing the benefits of feature reclaim and dense. However, the proposed model goes beyond this baseline by an absolute accuracy margin of 1.65% while also achieving uniformly higher precision, sensitivity, specificity, and F1-score. Essentially, this improvement is reliable across all evaluation metrics, demonstrating a genuine enhancement in diagnostic reliability rather than isolated optimization of a single performance indicator. Substantially, the higher specificity indicates a markedly reduced false-positive rate, a critical requirement in clinical neuro-oncology where diagnostic errors can lead to avoidable interventions and patient anxiety. Similarly, these findings support the core assumptions of efficiency-driven learning and demonstrate that meaningful performance gains arise not from increased architectural complexity, but from principled alignment between feature generation, attention selectivity, and task-specific imaging characteristics. Considering its deployment aspect, this design paradigm is much more beneficial when it comes to real-world settings, especially when the computational resource, latency constraint, and accuracy are considered.</p>
</sec>
<sec id="S4.SS2">
<label>4.2</label>
<title>Training dynamics and convergence behavior</title>
<p><xref ref-type="fig" rid="F6">Figure 6</xref> highlights the optimization process that takes place in the Enhanced MEGS-Net model based on ten training epochs altogether, and from this figure, there is a prominent display of an effective and well-regulated training process. From the accuracy figures shown in all the models, there can be noted an adjustment stage in which there are variations in the validation accuracy from 92.2 to 97.9%, and a marked increase in accuracy in the training figures from 86 to 97%. The sharp distinction marked between epochs 1 and 3 does not come as a surprise, as it marks the concomitant adjustment in the training data and the beginnings of generalization in the validation data for the first time in this stage.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>Training and Validation Dynamics - proposed model.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1778376-g006.tif">
<alt-text content-type="machine-generated">Side-by-side line charts compare the proposed model&#x2019;s performance over ten epochs. The left chart shows increasing training and validation accuracy, both above 0.90, while the right chart displays decreasing training and validation loss, both below 0.40.</alt-text>
</graphic>
</fig>
<p>Both curves converge to a narrow, stable band; validation accuracy consistently approaches the upper 97&#x2013;98% range, while training accuracy continues to improve incrementally proxy for controlled convergence rather than memorization. Training loss is falling off smoothly and from around 0.38 to around 0.08, which is characteristic for a well-conditioned optimization landscape. On the other hand, the loss of validation drops obviously during the initial epochs-from about 0.23 to 0.13-then records a low-variance regime, modestly fluctuating between 0.06 and 0.13 thereafter. By the final epochs, both losses converge to comparably low values, almost in the same range of approximately 0.05&#x2013;0.08-indicating that the network reaches a balanced fit. The absence of widening gaps between training and validation loss toward the later stages implies that overfitting remains limited, with the learned representations still retaining strong generalization capacity across data splits.</p>
</sec>
<sec id="S4.SS3">
<label>4.3</label>
<title>Confusion matrix analysis and per-class performance</title>
<p>The confusion matrices summarized in <xref ref-type="fig" rid="F7">Figure 7</xref> provide a granular view of class-wise behavior across all evaluated architectures and offer a more stringent test of clinical reliability than aggregate metrics alone. For the proposed model, classification accuracy reaches 97.01% for glioma cases (195 correctly identified out of 201), 98.50% for meningioma (197/200), and 98.05% for pituitary tumors (201/205). Misclassification events are sparse and lack any systematic cross-class bias, a pattern that suggests the learned representations are not merely optimized for global accuracy but are also well aligned with tumor-specific imaging signatures.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption><p>Comparative Confusion Matrices for Brain Tumor Classification Models.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1778376-g007.tif">
<alt-text content-type="machine-generated">Grid of six confusion matrix heatmaps compares brain tumor classification models: VGG16, VGG19, ResNet50, Xception, DenseNet121, and Proposed Model. Axes label true and predicted classes: Glioma, Meningioma, and Pituitary. Numerical values indicate correct and misclassified image counts for each class. Yellow indicates correct classifications, with brighter squares along diagonals suggesting better model accuracy.</alt-text>
</graphic>
</fig>
<p>This observation becomes clearer when the diagonal structure of the matrices is examined. All models exhibit some degree of diagonal dominance, yet the proposed architecture displays the tightest concentration along the principal diagonal and the lowest incidence of off-diagonal leakage. In contrast, baseline networks including VGG16, VGG19, ResNet50, Xception, and DenseNet121 show more frequent class confusion, particularly in tumor pairs with overlapping radiological appearance. The distinction between glioma and meningioma, a well-known diagnostic challenge due to shared intensity patterns and boundary ambiguity, is handled with noticeably greater consistency by the proposed model.</p>
<p>This should occur with a frequency that is hardly likely to be simplistic. Rather, it is likely indicative of the combined impact of redundancy-aware feature generation and channel re-weighting based on Efficient Channel Attention, where it seems to have the net effect of enhancing morphological feature detection while suppressing irrelevant background patterns. Secondly, the reduction of inter-class ambiguity, while of little consequence in clinical practice, primarily serves to further underscore the utility of the new architecture design.</p>
</sec>
<sec id="S4.SS4">
<label>4.4</label>
<title>Ablation study of ghost modules and efficient channel attention</title>
<p>To assess the individual contribution of each architectural component, a comprehensive ablation study was conducted using four model configurations: (1) the baseline ResNet50, (2) ResNet50 augmented with Ghost modules, (3) ResNet50 enhanced with Efficient Channel Attention (ECA), and (4) the full proposed model integrating both Ghost modules and ECA. All models were trained using the same preprocessing pipeline, dataset split, optimization strategy, and hyperparameters to ensure a fair comparison.</p>
<p>The results in <xref ref-type="table" rid="T3">Table 3</xref> show that both Ghost and ECA modules significantly improve performance compared to the base ResNet50 model. The introduction of Ghost modules leads to a substantial accuracy gain by reducing feature redundancy and improving parameter efficiency. Incorporating ECA alone yields the highest classification performance, highlighting the importance of channel-wise feature recalibration for discriminative MRI feature learning. When both components are integrated, the proposed model achieves a strong balance between accuracy and computational efficiency, slightly trading peak accuracy for reduced representational overhead and improved efficiency.</p>
<table-wrap position="float" id="T3">
<label>TABLE 3</label>
<caption><p>Ablation study evaluating the impact of ghost modules and efficient channel attention (ECA) on ResNet50 performance.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="center">Model</th>
<th valign="top" align="center">Accuracy (%)</th>
<th valign="top" align="center">Precision (%)</th>
<th valign="top" align="center">Recall (%)</th>
<th valign="top" align="center">F1-Score (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">ResNet50</td>
<td valign="top" align="center">76.07</td>
<td valign="top" align="center">76.28</td>
<td valign="top" align="center">75.98</td>
<td valign="top" align="center">75.86</td>
</tr>
<tr>
<td valign="top" align="center">ResNet50-Ghost</td>
<td valign="top" align="center">96.69</td>
<td valign="top" align="center">96.85</td>
<td valign="top" align="center">96.53</td>
<td valign="top" align="center">96.69</td>
</tr>
<tr>
<td valign="top" align="center">ResNet50-ECA</td>
<td valign="top" align="center">98.34</td>
<td valign="top" align="center">98.34</td>
<td valign="top" align="center">98.34</td>
<td valign="top" align="center">98.34</td>
</tr>
<tr>
<td valign="top" align="center">ResNet50-Ghost-ECA</td>
<td valign="top" align="center">97.85</td>
<td valign="top" align="center">97.84</td>
<td valign="top" align="center">97.85</td>
<td valign="top" align="center">97.85</td>
</tr>
</tbody>
</table></table-wrap>
</sec>
<sec id="S4.SS5">
<label>4.5</label>
<title>Multi-class performance evaluation</title>
<p>To provide a comprehensive evaluation of the proposed model in the multi-class setting, class-wise precision, recall, and F1-score were computed for each tumor category, as summarized in <xref ref-type="table" rid="T4">Table 4</xref>. This detailed reporting enables clearer interpretation of model behavior across different tumor types and avoids reliance on aggregated performance alone.</p>
<table-wrap position="float" id="T4">
<label>TABLE 4</label>
<caption><p>Class-wise precision, recall, and F1-score for the proposed model.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="center">Class</th>
<th valign="top" align="center">Precision (%)</th>
<th valign="top" align="center">Recall (%)</th>
<th valign="top" align="center">F1-Score (%)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Brain_glioma</td>
<td valign="top" align="center">98.98</td>
<td valign="top" align="center">97.01</td>
<td valign="top" align="center">97.99</td>
</tr>
<tr>
<td valign="top" align="center">Brain_menin</td>
<td valign="top" align="center">96.10</td>
<td valign="top" align="center">98.50</td>
<td valign="top" align="center">97.28</td>
</tr>
<tr>
<td valign="top" align="center">Brain_tumor</td>
<td valign="top" align="center">98.53</td>
<td valign="top" align="center">98.05</td>
<td valign="top" align="center">98.29</td>
</tr>
</tbody>
</table></table-wrap>
</sec>
</sec>
<sec id="S5" sec-type="discussion">
<label>5</label>
<title>Discussion</title>
<sec id="S5.SS1">
<label>5.1</label>
<title>Performance gains and statistical relevance</title>
<p>Analysis of the results obtained will show that this gain is mostly made possible by two significant factors: the use of Ghost Modules and ECA on top of ResNet50. More specifically, the use of ghost modules makes it possible to achieve greater efficiency in feature extraction by avoiding redundant computations and, at the same time, maintaining a diverse space. Meanwhile, on another front, ECA further refines this concept of focus of the model by prioritizing and deemphasizing the importance of certain channels of information, thereby resulting in greater effectiveness in feature extraction.</p>
<p>The indicated perfection, an absolute improvement of 1.65% error rate over DenseNet121 from 96.20 to 97.85%, is more than just an improvement: it is a significant improvement in reducing diagnostic error.&#x201D; In a practical clinical setup involving 1,000 patients scanned from MRI images, this will lead to a reduction from approximately 38 patients to 22 patients (error from 3.8 to 2.20%). But practically speaking, this translates to an improvement of 42%, an improvement that is not insignificant within oncology.</p>
<p>The fact that the high sensitivity rate is 97.85% is an indicator that, out of every 1,000, close to 977 are identified as cases, thereby ensuring the reduction of cases that are missed. Similarly, the other crucial part would be the high measure for the specificity rate at 98.93%. After all, it is an indicator that there is a high potential for being right when claiming that the particular picture is indeed normal, thereby elevating this particular aspect because it would imply that there is no need for procedures such as biopsies and scans at all. The difference between the measures for the two aspects: the accuracy level at 97.85%, the sensitivity level at 97.85%, and the precision at 97.84% would therefore point otherwise to an equal measure between the three.</p>
<p>All baseline models, including ResNet50, DenseNet121, and other comparative architectures, were trained using the same preprocessing pipeline, data augmentation strategy, dataset split, optimizer, learning rate, batch size, and number of training epochs as the proposed model. Each baseline was initialized with ImageNet-pretrained weights and fine-tuned following an identical transfer learning strategy to ensure fair comparison.</p>
<p>The comparatively lower performance of the standard ResNet50 model can be attributed to its relatively heavy architecture and higher parameter redundancy when applied to a limited-size medical imaging dataset. Without architectural modifications aimed at efficiency or enhanced feature recalibration, ResNet50 exhibits reduced generalization capability under constrained training conditions. In contrast, the proposed Ghost-ResNet with Efficient Channel Attention reduces feature redundancy and enhances discriminative channel-wise representations, resulting in improved learning efficiency and superior performance.</p>
<p>Although the proposed model achieves strong performance with reduced computational complexity, several improvements can be explored. These include incorporating multi-scale feature fusion to better capture tumor boundaries, experimenting with hybrid spatial&#x2013;channel attention mechanisms, and extending the model to 3D MRI volumes. Additionally, optimizing the model for real-time clinical deployment through hardware-aware pruning techniques may further enhance its applicability.</p>
</sec>
<sec id="S5.SS2">
<label>5.2</label>
<title>Framework for integration into hospital information systems</title>
<p>While the experimental evaluation presented in this study is limited to offline MRI classification, the proposed architecture is designed with future clinical integration considerations in mind. In a potential deployment scenario, the model could be incorporated into hospital information systems through standardized interfaces, where MRI images acquired in DICOM format may be retrieved from Picture Archiving and Communication Systems (PACS) for automated analysis. Preprocessing steps such as contrast normalization and intensity adjustment could be performed by an intermediate processing layer before inference.</p>
<p>The classification outputs, including predicted tumor class probabilities and visual explanations (e.g., Grad-CAM), could be structured in a format compatible with electronic health record (EHR) systems using established interoperability standards such as HL7. These outputs would be intended solely as decision-support information for radiologists, rather than as autonomous diagnostic results. Considerations related to data privacy, security, and traceability, such as compliance with HIPAA and GDPR guidelines, de-identification procedures, and system logging, are acknowledged as essential requirements for real-world deployment; however, they are not implemented or validated within the scope of the present study.</p>
<p>It should be emphasized that clinical deployment, regulatory approval, and prospective validation in real hospital environments remain outside the scope of this work and constitute important directions for future research. Any practical integration would require extensive testing, collaboration with clinical partners, and adherence to institutional and regulatory standards before being adopted in routine clinical workflows.</p>
</sec>
<sec id="S5.SS3">
<label>5.3</label>
<title>Comparison with existing brain tumor classification methods</title>
<p>In this paper, we introduce our proposal, the proposed model, and compare it with previous methods of brain tumor classification, to evaluate the performance, efficiency, and relevance of each. Unlike previous approaches, in which the main concern has been the highest possible accuracy via deeper networks or ensembles, we focus on efficient designs while not compromising the reliability of the classifier.</p>
<p><xref ref-type="table" rid="T5">Table 5</xref> also provides a detailed comparison of the proposed model with six very recent state-of-the-art studies from 2023 to 2025 on brain tumor classification based on MRI images. These studies utilized a wide range of methodological approaches: multi-model comparisons by Disci et al. and Rastogi et al., single optimized architectures by Shahin and Aiya et al., and novel custom designs by Zahoor et al. and Lin et al., with accuracy ranging from 96.11% to 99.66%. While the proposed model achieves 97.85% accuracy (ranking fourth), it demonstrates unique strengths not present in competing studies: (1) the only integration of Ghost modules for 40% parameter reduction, (2) mixed precision training reducing memory consumption by 50%, (3) comprehensive seven-stage preprocessing pipeline (CLAHE, Gaussian blur, unsharp masking, morphological operations, edge enhancement, bilateral filtering, histogram equalization) versus standard 1&#x2013;3 techniques in other studies, and (4) highest reported specificity (98.93%), critical for minimizing false positives in clinical practice. Although studies by Shahin (99.66%), Aiya et al. (99.00%), and Disci et al. (98.73%) achieved higher accuracy, these gains come with trade-offs, including increased computational complexity (multi-model approaches requiring six separate architectures) and specialized optimizers that require extensive tuning (Ranger), as well as complex preprocessing.</p>
<table-wrap position="float" id="T5">
<label>TABLE 5</label>
<caption><p>Comparison with existing brain tumor classification methods.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Study</th>
<th valign="top" align="left">Dataset</th>
<th valign="top" align="left">Methodology</th>
<th valign="top" align="left">Results</th>
<th valign="top" align="left">Methodology differences vs. proposed model</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><bold>Proposed model</bold></td>
<td valign="top" align="left"><bold>Bangladesh brain cancer MRI (6,056 images, 3 classes: Glioma, Meningioma, Pituitary)</bold></td>
<td valign="top" align="left"><bold>ResNet50, Ghost Module, ECA Attention, CLAHE</bold></td>
<td valign="top" align="left"><bold>Accuracy: 97.85%, Precision: 97.84%, Recall: 97.85%, Specificity: 98.93%, F1-Score: 97.85%</bold></td>
<td valign="top" align="left"><bold>Baseline for comparison</bold></td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B9">Disci et al. (2025)</xref></td>
<td valign="top" align="left">Figshare Dataset (7,023 images, 4 classes: Glioma, Meningioma, Pituitary, No tumor)</td>
<td valign="top" align="left">Xception + MobileNetV2 + InceptionV3 + ResNet50 + VGG16 + DenseNet121 (Multi-model comparison) + Transfer Learning + Standard Preprocessing</td>
<td valign="top" align="left">Accuracy: 98.73% (Xception best), F1-Score: 95.29%, Challenges: Recall improvement for Glioma/Meningioma, Class imbalance</td>
<td valign="top" align="left">No Ghost Module, No ECA Attention, No Mixed Precision, No CLAHE, Multi-model comparison approach, Better accuracy (+0.88%) but more complex with 6 models</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B25">Shahin (2025)</xref></td>
<td valign="top" align="left">Figshare (7,023 images, 4 classes: Glioma, Meningioma, Pituitary, No tumor)</td>
<td valign="top" align="left">ResNet34 (Fine-tuned) + Custom Classification Head + Data Augmentation + Ranger Optimizer (RAdam + Lookahead) + Transfer Learning</td>
<td valign="top" align="left">Accuracy: 99.66%, Training Time: 37 min (T4 GPU), Outperforms methods with 95-98% accuracy</td>
<td valign="top" align="left">ResNet34 (not ResNet50), No Ghost Module, No ECA Attention, No Mixed Precision, No CLAHE, Ranger optimizer (vs. Adam), Better accuracy (+1.81%), but lighter architecture and a different optimizer</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B34">Zahoor et al. (2024)</xref></td>
<td valign="top" align="left">Kaggle + Br35H + Figshare (Multiple datasets)</td>
<td valign="top" align="left">Res-BRNet (Residual + Region-based CNN) + Strategic Regional and Boundary Learning + Modified Spatial/Residual Blocks + Healthcare Expert Verification</td>
<td valign="top" align="left">Accuracy: 97.45%, High discrimination accuracy, Regional tumor properties learning</td>
<td valign="top" align="left">No ResNet50 (Res-BRNet custom architecture), No Ghost Module, No ECA Attention, No Mixed Precision, No CLAHE, Regional attention mechanism, Lower accuracy (-0.40%) but novel regional approach</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B17">Lin et al. (2025)</xref></td>
<td valign="top" align="left">Brain Tumor Dataset (Multiple classes)</td>
<td valign="top" align="left">ResNet50 + ECA + Fractional Denoising + Multi-scale Conv</td>
<td valign="top" align="left">96.72%</td>
<td valign="top" align="left">No Ghost, No Mixed Precision, Fractional instead of CLAHE, Long training</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B24">Rastogi et al. (2025)</xref></td>
<td valign="top" align="left">Kaggle Brain Tumor Dataset (4 classes)</td>
<td valign="top" align="left">InceptionResNetV2 + VGG19 + Xception + MobileNetV2 (Multi-architecture comparison) + Fine-tuned Transfer Learning + Standard Augmentation</td>
<td valign="top" align="left">Accuracy: 96.11% (Xception best)</td>
<td valign="top" align="left">No ResNet50 (InceptionResNetV2/Xception), No Ghost Module, No ECA Attention, No Mixed Precision, No CLAHE, Standard preprocessing, Lower accuracy (-1.74%)</td>
</tr>
<tr>
<td valign="top" align="left"><xref ref-type="bibr" rid="B2">Aiya et al. (2025)</xref></td>
<td valign="top" align="left">Figshare (7,023 images, 4 classes)</td>
<td valign="top" align="left">VGG16 + Attention Mechanisms + Grad-CAM + CLAHE + Advanced Preprocessing + Hyperparameter Optimization</td>
<td valign="top" align="left">Accuracy: 99.00%, Test Accuracy: 97.32% (real-time), Precision and Recall reported</td>
<td valign="top" align="left">No ResNet50 (VGG16 instead), No Ghost Module, No ECA (different attention), No Mixed Precision, CLAHE used (shared), Higher accuracy (+1.15%), but requires explainability overhead and complex preprocessing</td>
</tr>
</tbody>
</table></table-wrap>
<p>Thus, based on the outcome of the comparison tests, it can be concluded that our proposed Enhanced MEGS-Net defies the conventional notion of better performance in MRI images for the classification of brain tumors, necessarily calling for deeper and more complex computation models. Rather, it has been proven that optimal classification performance can be efficiently accomplished based on well-strategized feature extraction and lightweight attention mechanisms.</p>
<p>Notwithstanding, the defined limitations in the study ought to be considered. Firstly, the assessment is conducted with only one dataset, and this might influence the generalizability among the different populations, including different imaging protocols. Secondly, the current structure is designed to function with single-sequence MRI images, while the use of multi-parametric images might influence the performance. Lastly, this study was conducted to classify the tumor only, without considering the grading and the histopathologic analysis.</p>
</sec>
</sec>
<sec id="S6">
<label>6</label>
<title>Conclusion and future directions</title>
<p>This paper describes an advanced Ghost-ECA mixed-precision CNN architecture for the classification of brain tumors based on MRI images that tries to address the trade-off required in achieving accurate diagnoses and efficiency simultaneously. While combining Ghost modules for effective feature creation and Efficient Channel Attention for adaptive channel attention within the framework of the pre-trained backbone structure ResNet50, the proposed architecture was able to eliminate redundancy and preserve robust discriminative power at the same time. On the Bangladesh Brain Cancer MRI Dataset, the proposed architecture reported accuracy results of 97.85% on the respective task, surpassing the existing state-of-the-art accuracy reported on the same task using VGG16, VGG19, ResNet50, Xception, and DenseNet121 models in the field of image classification based on the transfer learning paradigm. Future research may focus on validating the proposed architecture across larger and multi-institutional MRI datasets to assess generalization capability. Exploring self-supervised or semi-supervised learning strategies could further reduce dependency on labeled data. Moreover, integrating explainability techniques such as Grad-CAM may enhance clinical trust by providing visual interpretation of model predictions. These directions offer promising pathways to further advance efficient and reliable brain tumor classification systems.</p>
</sec>
</body>
<back>
<sec id="S7" sec-type="data-availability">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found at: <ext-link ext-link-type="uri" xlink:href="https://www.kaggle.com/datasets/orvile/brain-cancer-mri-dataset">https://www.kaggle.com/datasets/orvile/brain-cancer-mri-dataset</ext-link>.</p>
</sec>
<sec id="S8" sec-type="author-contributions">
<title>Author contributions</title>
<p>NS: Software, Conceptualization, Methodology, Writing &#x2013; original draft, Project administration. KN: Methodology, Formal analysis, Resources, Writing &#x2013; original draft, Visualization. RA: Validation, Writing &#x2013; original draft, Project administration, Data curation, Visualization. ASA: Conceptualization, Supervision, Writing &#x2013; review &#x0026; editing, Funding acquisition. AMA: Investigation, Funding acquisition, Project administration, Writing &#x2013; review &#x0026; editing, Conceptualization. SA: Writing &#x2013; original draft, Validation, Methodology, Software. AA: Software, Writing &#x2013; review &#x0026; editing, Visualization, Data curation, Validation. MA: Formal analysis, Visualization, Resources, Supervision, Writing &#x2013; review &#x0026; editing, Investigation.</p>
</sec>
<sec id="S10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="S11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="S12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ahsan</surname> <given-names>R.</given-names></name> <name><surname>Shahzadi</surname> <given-names>I.</given-names></name> <name><surname>Najeeb</surname> <given-names>F.</given-names></name> <name><surname>Omer</surname> <given-names>H.</given-names></name></person-group> (<year>2025</year>). <article-title>Brain tumor detection and segmentation using deep learning.</article-title> <source><italic>MAGMA</italic></source> <volume>38</volume> <fpage>13</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1007/s10334-024-01203-5</pub-id> <pub-id pub-id-type="pmid">39231857</pub-id></mixed-citation></ref>
<ref id="B2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Aiya</surname> <given-names>A. J.</given-names></name> <name><surname>Wani</surname> <given-names>N.</given-names></name> <name><surname>Ramani</surname> <given-names>M.</given-names></name> <name><surname>Kumar</surname> <given-names>A.</given-names></name> <name><surname>Pant</surname> <given-names>S.</given-names></name> <name><surname>Kotecha</surname> <given-names>K.</given-names></name><etal/></person-group> (<year>2025</year>). <article-title>Optimized deep learning for brain tumor detection: a hybrid approach with attention mechanisms and clinical explainability.</article-title> <source><italic>Sci. Rep</italic>.</source> <volume>15</volume>:<fpage>31386</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-04591-3</pub-id> <pub-id pub-id-type="pmid">40858650</pub-id></mixed-citation></ref>
<ref id="B3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Albalawi</surname> <given-names>E.</given-names></name> <name><surname>Thakur</surname> <given-names>A.</given-names></name> <name><surname>Dorai</surname> <given-names>D. R.</given-names></name> <name><surname>Bhatia Khan</surname> <given-names>S.</given-names></name> <name><surname>Mahesh</surname> <given-names>T. R.</given-names></name> <name><surname>Almusharraf</surname> <given-names>A.</given-names></name><etal/></person-group> (<year>2024</year>). <article-title>Enhancing brain tumor classification in MRI scans with a multi-layer customized convolutional neural network approach.</article-title> <source><italic>Front. Comput. Neurosci</italic>.</source> <volume>18</volume>:<fpage>1418546</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2024.1418546</pub-id> <pub-id pub-id-type="pmid">38933391</pub-id></mixed-citation></ref>
<ref id="B4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Babu Vimala</surname> <given-names>B.</given-names></name> <name><surname>Srinivasan</surname> <given-names>S.</given-names></name> <name><surname>Mathivanan</surname> <given-names>S. K.</given-names></name> <name><surname>Mahalakshmi, Jayagopal</surname> <given-names>P.</given-names></name> <name><surname>Dalu</surname> <given-names>G. T.</given-names></name></person-group> (<year>2023</year>). <article-title>Detection and classification of brain tumor using hybrid deep learning models.</article-title> <source><italic>Sci. Rep</italic>.</source> <volume>13</volume>:<fpage>23029</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-023-50505-6</pub-id> <pub-id pub-id-type="pmid">38155247</pub-id></mixed-citation></ref>
<ref id="B5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bakr</surname> <given-names>E. M.</given-names></name> <name><surname>El-Sallab</surname> <given-names>A.</given-names></name> <name><surname>Rashwan</surname> <given-names>M.</given-names></name></person-group> (<year>2022</year>). <article-title>EMCA: efficient multiscale channel attention module.</article-title> <source><italic>IEEE Access</italic></source> <volume>10</volume> <fpage>103447</fpage>&#x2013;<lpage>103461</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2022.3205602</pub-id></mixed-citation></ref>
<ref id="B6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bouhafra</surname> <given-names>S.</given-names></name> <name><surname>El Bahi</surname> <given-names>H.</given-names></name></person-group> (<year>2025</year>). <article-title>Deep learning approaches for brain tumor detection and classification using MRI images (2020 to 2024): a systematic review.</article-title> <source><italic>J. Imaging Inform. Med</italic>.</source> <volume>38</volume> <fpage>1403</fpage>&#x2013;<lpage>1433</lpage>. <pub-id pub-id-type="doi">10.1007/s10278-024-01283-8</pub-id> <pub-id pub-id-type="pmid">39349785</pub-id></mixed-citation></ref>
<ref id="B7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>C.</given-names></name> <name><surname>Guo</surname> <given-names>Z.</given-names></name> <name><surname>Zeng</surname> <given-names>H.</given-names></name> <name><surname>Xiong</surname> <given-names>P.</given-names></name> <name><surname>Dong</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Repghost: a hardware-efficient ghost module via re-parameterization.</article-title> <source><italic>arXiv [Preprint].</italic></source> <pub-id pub-id-type="doi">10.48550/arXiv.2211.06088</pub-id></mixed-citation></ref>
<ref id="B8"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Choudhary</surname> <given-names>S.</given-names></name> <name><surname>Meena</surname> <given-names>G.</given-names></name></person-group> (<year>2021</year>). &#x201C;<article-title>Abnormality detection in musculoskeletal radiographs</article-title>,&#x201D; in <source><italic>Proceedings of the IOP Conference Series: Materials Science and Engineering</italic></source>, <volume>Vol. 2021</volume> (<publisher-loc>Bristol</publisher-loc>: <publisher-name>IOP Publishing</publisher-name>), <fpage>12009</fpage>.</mixed-citation></ref>
<ref id="B9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Disci</surname> <given-names>R.</given-names></name> <name><surname>Gurcan</surname> <given-names>F.</given-names></name> <name><surname>Soylu</surname> <given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>Advanced brain tumor classification in MR images using transfer learning and pre-trained deep CNN Models.</article-title> <source><italic>Cancers</italic></source> <volume>17</volume>:<fpage>121</fpage>. <pub-id pub-id-type="doi">10.3390/cancers17010121</pub-id> <pub-id pub-id-type="pmid">39796749</pub-id></mixed-citation></ref>
<ref id="B10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ferdous</surname> <given-names>M.</given-names></name> <name><surname>Mahmud</surname> <given-names>S.</given-names></name> <name><surname>Shimul</surname> <given-names>M. E. Z.</given-names></name></person-group> (<year>2025</year>). <article-title>MedNet: a lightweight attention-augmented CNN for medical image classification.</article-title> <source><italic>Sci. Rep</italic>.</source> <volume>15</volume>:<fpage>41936</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-25857-w</pub-id> <pub-id pub-id-type="pmid">41290816</pub-id></mixed-citation></ref>
<ref id="B11"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Han</surname> <given-names>K.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Tian</surname> <given-names>Q.</given-names></name> <name><surname>Guo</surname> <given-names>J.</given-names></name> <name><surname>Xu</surname> <given-names>C.</given-names></name> <name><surname>Xu</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). &#x201C;<article-title>Ghostnet: more features from cheap operations</article-title>,&#x201D; in <source><italic>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</italic></source>, (<publisher-loc>Seattle, WA</publisher-loc>), <fpage>1580</fpage>&#x2013;<lpage>1589</lpage>.</mixed-citation></ref>
<ref id="B12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hossin</surname> <given-names>M.</given-names></name> <name><surname>Sulaiman</surname> <given-names>M. N.</given-names></name></person-group> (<year>2015</year>). <article-title>A review on evaluation metrics for data classification evaluations.</article-title> <source><italic>Int. J. Data Min. Knowl. Manage. Process</italic></source> <volume>5</volume>:<fpage>1</fpage>. <pub-id pub-id-type="doi">10.5121/ijdkp.2015.5201</pub-id></mixed-citation></ref>
<ref id="B13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Iftikhar</surname> <given-names>S.</given-names></name> <name><surname>Anjum</surname> <given-names>N.</given-names></name> <name><surname>Siddiqui</surname> <given-names>A. B.</given-names></name> <name><surname>Ur Rehman</surname> <given-names>M.</given-names></name> <name><surname>Ramzan</surname> <given-names>N.</given-names></name></person-group> (<year>2025</year>). <article-title>Explainable CNN for brain tumor detection and classification through XAI based key features identification.</article-title> <source><italic>Brain Inform</italic>.</source> <volume>12</volume>:<fpage>10</fpage>. <pub-id pub-id-type="doi">10.1186/s40708-025-00257-y</pub-id> <pub-id pub-id-type="pmid">40304860</pub-id></mixed-citation></ref>
<ref id="B14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kanal</surname> <given-names>E.</given-names></name> <name><surname>Maki</surname> <given-names>J. H.</given-names></name> <name><surname>Schramm</surname> <given-names>P.</given-names></name> <name><surname>Marti-Bonmati</surname> <given-names>L.</given-names></name></person-group> (<year>2025</year>). <article-title>Evolving characteristics of gadolinium-based contrast agents for MR Imaging: a systematic review of the importance of relaxivity.</article-title> <source><italic>J. Magn. Reson. Imaging</italic></source> <volume>61</volume> <fpage>52</fpage>&#x2013;<lpage>69</lpage>. <pub-id pub-id-type="doi">10.1002/jmri.29367</pub-id> <pub-id pub-id-type="pmid">38699938</pub-id></mixed-citation></ref>
<ref id="B15"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Khandaker</surname> <given-names>M. A. A.</given-names></name> <name><surname>Raha</surname> <given-names>Z. S.</given-names></name> <name><surname>Bin Iqbal</surname> <given-names>S.</given-names></name> <name><surname>Mridha</surname> <given-names>M. F.</given-names></name> <name><surname>Shin</surname> <given-names>J.</given-names></name></person-group> (<year>2024</year>). &#x201C;<article-title>From images to insights: transforming brain cancer diagnosis with explainable AI</article-title>,&#x201D; in <source><italic>Proceedings of the 2024 27th International Conference on Computer and Information Technology (ICCIT)</italic></source>, (<publisher-loc>Sydney</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>891</fpage>&#x2013;<lpage>896</lpage>.</mixed-citation></ref>
<ref id="B16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Ammari</surname> <given-names>S.</given-names></name> <name><surname>Balleyguier</surname> <given-names>C.</given-names></name> <name><surname>Lassau</surname> <given-names>N.</given-names></name> <name><surname>Chouzenoux</surname> <given-names>E.</given-names></name></person-group> (<year>2021</year>). <article-title>Impact of preprocessing and harmonization methods on the removal of scanner effects in brain MRI radiomic features.</article-title> <source><italic>Cancers</italic></source> <volume>13</volume>:<fpage>3000</fpage>. <pub-id pub-id-type="doi">10.3390/cancers13123000</pub-id> <pub-id pub-id-type="pmid">34203896</pub-id></mixed-citation></ref>
<ref id="B17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lin</surname> <given-names>J.</given-names></name> <name><surname>Huang</surname> <given-names>L.</given-names></name> <name><surname>Ding</surname> <given-names>L.</given-names></name> <name><surname>Yan</surname> <given-names>S.</given-names></name></person-group> (<year>2025</year>). <article-title>Deep brain tumor lesion classification network: a hybrid method optimizing ResNet50 and EfficientNetB0 for enhanced feature extraction.</article-title> <source><italic>Fract. Fract.</italic></source> <volume>9</volume>:<fpage>614</fpage>. <pub-id pub-id-type="doi">10.3390/fractalfract9090614</pub-id></mixed-citation></ref>
<ref id="B18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Litjens</surname> <given-names>G.</given-names></name> <name><surname>Kooi</surname> <given-names>T.</given-names></name> <name><surname>Bejnordi</surname> <given-names>B. E.</given-names></name> <name><surname>Setio</surname> <given-names>A. A. A.</given-names></name> <name><surname>Ciompi</surname> <given-names>F.</given-names></name> <name><surname>Ghafoorian</surname> <given-names>M.</given-names></name><etal/></person-group> (<year>2017</year>). <article-title>A survey on deep learning in medical image analysis.</article-title> <source><italic>Med. Image Anal</italic>.</source> <volume>42</volume> <fpage>60</fpage>&#x2013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id> <pub-id pub-id-type="pmid">28778026</pub-id></mixed-citation></ref>
<ref id="B19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>K.</given-names></name> <name><surname>Qin</surname> <given-names>S.</given-names></name> <name><surname>Ning</surname> <given-names>J.</given-names></name> <name><surname>Xin</surname> <given-names>P.</given-names></name> <name><surname>Wang</surname> <given-names>Q.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name><etal/></person-group> (<year>2023</year>). <article-title>Prediction of primary tumor sites in spinal metastases using a ResNet-50 convolutional neural network based on MRI.</article-title> <source><italic>Cancers</italic></source> <volume>15</volume>:<fpage>2974</fpage>. <pub-id pub-id-type="doi">10.3390/cancers15112974</pub-id> <pub-id pub-id-type="pmid">37296938</pub-id></mixed-citation></ref>
<ref id="B20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Meena</surname> <given-names>G.</given-names></name> <name><surname>Mohbey</surname> <given-names>K. K.</given-names></name> <name><surname>Acharya</surname> <given-names>M.</given-names></name> <name><surname>Lokesh</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>Original Research Article An improved convolutional neural network-based model for detecting brain tumors from augmented MRI images.</article-title> <source><italic>J. Auton. Intell.</italic></source> <volume>6</volume>. <pub-id pub-id-type="doi">10.32629/jai.v6i1.561</pub-id></mixed-citation></ref>
<ref id="B21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mishra</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Contrast limited adaptive histogram equalization (CLAHE) approach for enhancement of the microstructures of friction stir welded joints.</article-title> <source><italic>arXiv [Preprint].</italic></source> <pub-id pub-id-type="doi">10.48550/arXiv.2109.00886</pub-id></mixed-citation></ref>
<ref id="B22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Price</surname> <given-names>M.</given-names></name> <name><surname>Ballard</surname> <given-names>C. A. P.</given-names></name> <name><surname>Benedetti</surname> <given-names>J. R.</given-names></name> <name><surname>Kruchko</surname> <given-names>C.</given-names></name> <name><surname>Barnholtz-Sloan</surname> <given-names>J. S.</given-names></name> <name><surname>Ostrom</surname> <given-names>Q. T.</given-names></name></person-group> (<year>2025</year>). <article-title>CBTRUS Statistical Report: primary brain and other central nervous system tumors diagnosed in the United States in 2018-2022.</article-title> <source><italic>Neuro Oncol</italic></source> <volume>27</volume>(<issue>Suppl._4</issue>), <fpage>iv1</fpage>&#x2013;<lpage>iv66</lpage>. <pub-id pub-id-type="doi">10.1093/neuonc/noaf194</pub-id> <pub-id pub-id-type="pmid">41092086</pub-id></mixed-citation></ref>
<ref id="B23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rahman</surname> <given-names>M. M.</given-names></name></person-group> (<year>2024</year>). <article-title>Brain cancer-mri dataset.</article-title> <source><italic>Mendeley Data</italic></source> <volume>1</volume>. <pub-id pub-id-type="doi">10.17632/mk56jw9rns.1</pub-id></mixed-citation></ref>
<ref id="B24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rastogi</surname> <given-names>D.</given-names></name> <name><surname>Johri</surname> <given-names>P.</given-names></name> <name><surname>Donelli</surname> <given-names>M.</given-names></name> <name><surname>Kumar</surname> <given-names>L.</given-names></name> <name><surname>Bindewari</surname> <given-names>S.</given-names></name> <name><surname>Raghav</surname> <given-names>A.</given-names></name><etal/></person-group> (<year>2025</year>). <article-title>Brain tumor detection and prediction in MRI images utilizing a fine-tuned transfer learning model integrated within deep learning frameworks.</article-title> <source><italic>Life</italic></source> <volume>15</volume>:<fpage>327</fpage>. <pub-id pub-id-type="doi">10.3390/life15030327</pub-id> <pub-id pub-id-type="pmid">40141673</pub-id></mixed-citation></ref>
<ref id="B25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shahin</surname> <given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>Fine-tuned ResNet34 for efficient brain tumor classification.</article-title> <source><italic>Sci. Rep</italic>.</source> <volume>15</volume>:<fpage>36910</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-20872-3</pub-id> <pub-id pub-id-type="pmid">41125736</pub-id></mixed-citation></ref>
<ref id="B26"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shen</surname> <given-names>D.</given-names></name> <name><surname>Wu</surname> <given-names>G.</given-names></name> <name><surname>Suk</surname> <given-names>H. I.</given-names></name></person-group> (<year>2017</year>). <article-title>Deep learning in medical image analysis.</article-title> <source><italic>Annu. Rev. Biomed. Eng</italic>.</source> <volume>19</volume> <fpage>221</fpage>&#x2013;<lpage>248</lpage>. <pub-id pub-id-type="doi">10.1146/annurev-bioeng-071516-044442</pub-id> <pub-id pub-id-type="pmid">28301734</pub-id></mixed-citation></ref>
<ref id="B27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vamsidhar</surname> <given-names>D.</given-names></name> <name><surname>Desai</surname> <given-names>P.</given-names></name> <name><surname>Joshi</surname> <given-names>S.</given-names></name> <name><surname>Kolhar</surname> <given-names>S.</given-names></name> <name><surname>Deshpande</surname> <given-names>N.</given-names></name> <name><surname>Gite</surname> <given-names>S.</given-names></name></person-group> (<year>2025</year>). <article-title>Hybrid model integration with explainable AI for brain tumor diagnosis: a unified approach to MRI analysis and prediction.</article-title> <source><italic>Sci. Rep</italic>.</source> <volume>15</volume>:<fpage>20542</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-06455-2</pub-id> <pub-id pub-id-type="pmid">40596288</pub-id></mixed-citation></ref>
<ref id="B28"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Q.</given-names></name> <name><surname>Wu</surname> <given-names>B.</given-names></name> <name><surname>Zhu</surname> <given-names>P.</given-names></name> <name><surname>Li</surname> <given-names>P.</given-names></name> <name><surname>Zuo</surname> <given-names>W.</given-names></name> <name><surname>Hu</surname> <given-names>Q.</given-names></name></person-group> (<year>2020</year>). &#x201C;<article-title>ECA-Net: efficient channel attention for deep convolutional neural networks</article-title>,&#x201D; in <source><italic>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</italic></source>, (<publisher-loc>Seattle, WA</publisher-loc>), <fpage>11534</fpage>&#x2013;<lpage>11542</lpage>.</mixed-citation></ref>
<ref id="B29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wen</surname> <given-names>L.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Gao</surname> <given-names>L.</given-names></name></person-group> (<year>2020</year>). <article-title>A transfer convolutional neural network for fault diagnosis based on ResNet-50.</article-title> <source><italic>Neural Comput. Appl.</italic></source> <volume>32</volume> <fpage>6111</fpage>&#x2013;<lpage>6124</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-019-04097-w</pub-id></mixed-citation></ref>
<ref id="B30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wong</surname> <given-names>Y.</given-names></name> <name><surname>Su</surname> <given-names>E. L. M.</given-names></name> <name><surname>Yeong</surname> <given-names>C. F.</given-names></name> <name><surname>Holderbaum</surname> <given-names>W.</given-names></name> <name><surname>Yang</surname> <given-names>C.</given-names></name></person-group> (<year>2025</year>). <article-title>Brain tumor classification using MRI images and deep learning techniques.</article-title> <source><italic>PLoS One</italic></source> <volume>20</volume>:<fpage>e0322624</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0322624</pub-id> <pub-id pub-id-type="pmid">40344143</pub-id></mixed-citation></ref>
<ref id="B31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yadav</surname> <given-names>R. K.</given-names></name> <name><surname>Mishra</surname> <given-names>A. K.</given-names></name> <name><surname>Saini</surname> <given-names>J. B.</given-names></name> <name><surname>Pant</surname> <given-names>H.</given-names></name> <name><surname>Biradar</surname> <given-names>R. G.</given-names></name> <name><surname>Waghodekar</surname> <given-names>P.</given-names></name></person-group> (<year>2024</year>). <article-title>A model for brain tumor detection using a modified convolution layer resnet-50.</article-title> <source><italic>Indian J. Inform. Sources Serv.</italic></source> <volume>14</volume> <fpage>29</fpage>&#x2013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.51983/ijiss-2024.14.1.3753</pub-id></mixed-citation></ref>
<ref id="B32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yagis</surname> <given-names>E.</given-names></name> <name><surname>Atnafu</surname> <given-names>S. W.</given-names></name> <name><surname>Garc&#x00ED;a Seco de Herrera</surname> <given-names>A.</given-names></name> <name><surname>Marzi</surname> <given-names>C.</given-names></name> <name><surname>Scheda</surname> <given-names>R.</given-names></name> <name><surname>Giannelli</surname> <given-names>M.</given-names></name><etal/></person-group> (<year>2021</year>). <article-title>Effect of data leakage in brain MRI classification using 2D convolutional neural networks.</article-title> <source><italic>Sci. Rep</italic>.</source> <volume>11</volume>:<fpage>22544</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-01681-w</pub-id> <pub-id pub-id-type="pmid">34799630</pub-id></mixed-citation></ref>
<ref id="B33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zacharaki</surname> <given-names>E. I.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Chawla</surname> <given-names>S.</given-names></name> <name><surname>Soo Yoo</surname> <given-names>D.</given-names></name> <name><surname>Wolf</surname> <given-names>R.</given-names></name> <name><surname>Melhem</surname> <given-names>E. R.</given-names></name><etal/></person-group> (<year>2009</year>). <article-title>Classification of brain tumor type and grade using MRI texture and shape in a machine learning scheme.</article-title> <source><italic>Magn. Reson. Med</italic>.</source> <volume>62</volume> <fpage>1609</fpage>&#x2013;<lpage>1618</lpage>. <pub-id pub-id-type="doi">10.1002/mrm.22147</pub-id> <pub-id pub-id-type="pmid">19859947</pub-id></mixed-citation></ref>
<ref id="B34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zahoor</surname> <given-names>M. M.</given-names></name> <name><surname>Khan</surname> <given-names>S. H.</given-names></name> <name><surname>Alahmadi</surname> <given-names>T. J.</given-names></name> <name><surname>Alsahfi</surname> <given-names>T.</given-names></name> <name><surname>Mazroa</surname> <given-names>A. S. A.</given-names></name> <name><surname>Sakr</surname> <given-names>H. A.</given-names></name><etal/></person-group> (<year>2024</year>). <article-title>Brain tumor MRI classification using a novel deep residual and regional CNN.</article-title> <source><italic>Biomedicines</italic></source> <volume>12</volume>:<fpage>1395</fpage>. <pub-id pub-id-type="doi">10.3390/biomedicines12071395</pub-id> <pub-id pub-id-type="pmid">39061969</pub-id></mixed-citation></ref>
<ref id="B35"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zuiderveld</surname> <given-names>K.</given-names></name></person-group> (<year>1994</year>). &#x201C;<article-title>Contrast limited adaptive histogram equalization</article-title>,&#x201D; in <source><italic>Graphics Gems IV</italic></source>, <role>ed.</role> <person-group person-group-type="editor"><name><surname>Heckbert</surname> <given-names>P. S.</given-names></name></person-group> (<publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>Academic Press</publisher-name>), <fpage>474</fpage>&#x2013;<lpage>485</lpage>.</mixed-citation></ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/260253/overview">Hyunjin Park</ext-link>, Sungkyunkwan University, Republic of Korea</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2354767/overview">Gaurav Meena</ext-link>, Central University of Rajasthan, India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3358413/overview">Shake Ibna Abir</ext-link>, Florida Gulf Coast University, United States</p></fn>
</fn-group>
</back>
</article>