<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Cell Dev. Biol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Cell and Developmental Biology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Cell Dev. Biol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-634X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1767574</article-id>
<article-id pub-id-type="doi">10.3389/fcell.2026.1767574</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>CG-RecNet: a gated and attention-fused deep learning framework for label-free classification of neural stem cell differentiation via imaging flow cytometry</article-title>
<alt-title alt-title-type="left-running-head">Li et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2026.1767574">10.3389/fcell.2026.1767574</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Qinzi</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3313026"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Liu</surname>
<given-names>Fang</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3312996"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhou</surname>
<given-names>Junyu</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3356132"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zou</surname>
<given-names>Xuanjian</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Gao</surname>
<given-names>Chenlin</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Li</surname>
<given-names>Jingze</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2761156"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>College of Life Sciences, Sichuan Agricultural University</institution>, <city>Ya&#x2019;an</city>, <country country="CN">China</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>College of Water Resources and Hydropower, Sichuan Agricultural University</institution>, <city>Ya&#x2019;an</city>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>College of Science, Sichuan Agricultural University</institution>, <city>Ya&#x2019;an</city>, <country country="CN">China</country>
</aff>
<aff id="aff4">
<label>4</label>
<institution>College of Information Engineering, Sichuan Agricultural University</institution>, <city>Ya&#x2019;an</city>, <country country="CN">China</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Jingze Li, <email xlink:href="mailto:l14677768612021@163.com">l14677768612021@163.com</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-16">
<day>16</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>14</volume>
<elocation-id>1767574</elocation-id>
<history>
<date date-type="received">
<day>14</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>27</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>27</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Li, Liu, Zhou, Zou, Gao and Li.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Li, Liu, Zhou, Zou, Gao and Li</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-16">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Precise and longitudinal monitoring of Neural Stem Cell (NSC) differentiation is pivotal for advancing regenerative medicine. However, traditional identification methods rely on invasive immunochemical staining, which terminates cell viability and precludes real-time analysis.</p>
</sec>
<sec>
<title>Methods</title>
<p>To address these limitations, we propose CG-RecNet, a specialized deep learning framework for accurate, label-free classification of NSC differentiation lineages&#x2014;specifically neurons, astrocytes, and oligodendrocytes&#x2014;directly from brightfield imaging flow cytometry (IFC) data. The architecture integrates a LinAngular Cross-Channel Attention (LinAngular-XCA) Fusion Module to capture global morphological dependencies and a Gated Convolutional Neural Network (GatedCNN) Block to suppress background noise.</p>
</sec>
<sec>
<title>Results</title>
<p>Validation on rat embryonic NSCs indicates that CG-RecNet achieves an overall accuracy of 96.40% and a macro-average AUC of 0.9979, representing a 1.82% improvement over established baselines. Notably, the model achieved high precision in identifying the minority oligodendrocyte lineage without synthetic oversampling.</p>
</sec>
<sec>
<title>Discussion</title>
<p>Grad-CAM analysis indicates that the model&#x2019;s attention aligns with biologically relevant hallmarks, such as neurite outgrowth and soma texture. CG-RecNet provides a reliable, non-invasive, and qualitatively interpretable tool for neural stem cell research.</p>
</sec>
</abstract>
<kwd-group>
<kwd>attention mechanism</kwd>
<kwd>deep learning</kwd>
<kwd>explainable AI</kwd>
<kwd>high-throughput screening</kwd>
<kwd>label-free classification</kwd>
<kwd>neural stem cells</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="8"/>
<table-count count="5"/>
<equation-count count="5"/>
<ref-count count="46"/>
<page-count count="16"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Stem Cell Research</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Neurological disorders, encompassing acute traumatic injuries and chronic neurodegenerative conditions such as Alzheimer&#x2019;s disease (AD), Parkinson&#x2019;s disease (PD) (<xref ref-type="bibr" rid="B21">Kalia and Lang, 2015</xref>), and Multiple Sclerosis (MS), constitute a profound global health burden (<xref ref-type="bibr" rid="B12">Feigin et al., 2019</xref>). The pathophysiology of these conditions is complex, typically characterized by neuronal loss, pathogenic protein accumulation, and widespread demyelination (<xref ref-type="bibr" rid="B20">Jucker and Walker, 2018</xref>; <xref ref-type="bibr" rid="B16">Hauser and Oksenberg, 2006</xref>; <xref ref-type="bibr" rid="B28">Long and Holtzman, 2019</xref>). Crucially, the Central Nervous System (CNS) response to such pathology is heavily mediated by glial cells. While astrocytes and oligodendrocytes play indispensable roles in maintaining homeostasis and facilitating signal conduction, their dysregulation&#x2014;manifesting as reactive astrogliosis or inflammatory crosstalk&#x2014;can significantly impede neural regeneration (<xref ref-type="bibr" rid="B24">Linnerbauer et al., 2020</xref>; <xref ref-type="bibr" rid="B36">Sofroniew, 2009</xref>; <xref ref-type="bibr" rid="B35">Siracusa et al., 2019</xref>). For instance, the glial scar, while containing injury, often acts as a physical barrier to axonal regrowth (<xref ref-type="bibr" rid="B2">Bradbury and Burnside, 2019</xref>; <xref ref-type="bibr" rid="B11">Fawcett and Asher, 1999</xref>; <xref ref-type="bibr" rid="B4">Cieri and Ramos, 2025</xref>), whereas the failure of oligodendrocyte precursor cells to remyelinate axons marks the functional decline in MS (<xref ref-type="bibr" rid="B13">Franklin et al., 2024</xref>; <xref ref-type="bibr" rid="B39">Warnock et al., 2020</xref>).</p>
<p>Given this context, Neural Stem Cells (NSCs) have emerged as a pivotal therapeutic strategy due to their intrinsic capacity for self-renewal and multipotent differentiation into neurons and glia (<xref ref-type="bibr" rid="B27">Li et al., 2024</xref>; <xref ref-type="bibr" rid="B7">Dimou and G&#xf6;tz, 2014</xref>). The transplantation of exogenous NSCs or the mobilization of endogenous progenitors holds significant promise for replacing lost neurons and modulating the immune microenvironment (<xref ref-type="bibr" rid="B6">De Gioia et al., 2020</xref>; <xref ref-type="bibr" rid="B30">Martino et al., 2011</xref>). Consequently, identifying and directing the fate of NSCs&#x2014;specifically distinguishing between functional neurons and supporting glial phenotypes&#x2014;is critical for the efficacy of cell-based therapies (<xref ref-type="bibr" rid="B15">Gao et al., 2023</xref>). However, tracking NSC fate <italic>in vivo</italic> and <italic>in vitro</italic> remains a significant challenge (<xref ref-type="bibr" rid="B41">Xue et al., 2022</xref>). Traditional identification relies heavily on molecular assays such as immunofluorescent staining. Although these techniques provide specific molecular markers, they are inherently invasive and destructive, precluding the real-time, longitudinal monitoring of live cell cultures required for high-throughput drug screening.</p>
<p>To overcome these methodological bottlenecks, artificial intelligence has been increasingly integrated into biomedical research. Deep learning (DL), particularly Convolutional Neural Networks (CNNs), has demonstrated substantial potential in medical image analysis (<xref ref-type="bibr" rid="B19">Jia et al., 2024</xref>), automating complex diagnostic tasks with high precision (<xref ref-type="bibr" rid="B25">Litjens et al., 2017</xref>; <xref ref-type="bibr" rid="B10">Esteva et al., 2017</xref>; <xref ref-type="bibr" rid="B43">Zhou et al., 2021</xref>). In the realm of cellular imaging, DL algorithms have successfully segmented cells, analyzed phenotypes, and performed &#x201c;<italic>in silico</italic> labeling&#x201d;&#x2014;predicting fluorescent labels from label-free brightfield images (<xref ref-type="bibr" rid="B23">Krikid et al., 2024</xref>; <xref ref-type="bibr" rid="B29">Mao and He, 2024</xref>; <xref ref-type="bibr" rid="B31">Moen et al., 2019</xref>; <xref ref-type="bibr" rid="B3">Christiansen et al., 2018</xref>).</p>
<p>Building on these advancements, Zhu et al. pioneered a deep learning-based approach specifically for NSC differentiation (<xref ref-type="bibr" rid="B46">Zhu et al., 2021</xref>). Their work established that label-free brightfield microscopy images contain sufficient morphological information to distinguish between neuronal and glial lineages without chemical staining. This validated the feasibility of using deep learning as a non-invasive alternative to biological assays. However, current state-of-the-art approaches primarily rely on standard CNN backbones such as ResNet (<xref ref-type="bibr" rid="B17">He et al., 2016</xref>) or Xception. While effective at extracting local texture features, these architectures employ fixed receptive fields that often struggle to capture long-range morphological dependencies&#x2014;such as the correlation between the soma and distant neurite extensions&#x2014;which are essential for distinguishing subtle phenotypes (<xref ref-type="bibr" rid="B38">Vaswani et al., 2017</xref>; <xref ref-type="bibr" rid="B14">Fujitani et al., 2017</xref>). Furthermore, standard convolutions lack explicit mechanisms to suppress the background noise and cellular debris inherent in label-free brightfield imaging, potentially compromising classification accuracy in complex culture environments (<xref ref-type="bibr" rid="B42">Yang et al., 2025</xref>).</p>
<p>To address these limitations, we propose CG-RecNet, a specialized deep learning framework engineered to enhance the predictive precision of NSC differentiation using high-throughput imaging flow cytometry. Built upon a ResNet50 backbone, our architecture integrates a LinAngular Cross-Channel Attention (LinAngular-XCA) Fusion Module (<xref ref-type="bibr" rid="B45">Zhou et al., 2023b</xref>) to explicitly model global semantic dependencies and a Gated Convolutional Block to robustly suppress background noise while refining local feature extraction. While IFC simplifies the requirement for cell segmentation compared to tissue microscopy, the challenge remains in distinguishing subtle, fine-grained phenotypic differences in low-contrast brightfield images. By synergistically processing both global context and local texture details, our model aims to overcome the inductive bias limitations of conventional CNNs. We comprehensively validate our approach on established datasets and provide interpretability via heatmap visualizations, offering a reliable and transparent tool for accelerating NSC research and therapeutic development.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Proposed methodology</title>
<sec id="s2-1">
<label>2.1</label>
<title>Overview of the proposed framework</title>
<p>The workflow of the CG-RecNet system is illustrated in <xref ref-type="fig" rid="F1">Figure 1</xref>. The framework consists of four stages: (1) Data Acquisition, where brightfield images are collected during NSC differentiation; (2) Preprocessing, including geometric transformations and normalization to enhance model generalization; (3) Model Training, where the CG-RecNet architecture&#x2014;leveraging ResNet50-based feature extraction, LinAngular-XCA, and Gated CNNs&#x2014;classifies the three neural cell types; and (4) Interpretation, employing Grad-CAM to visualize decision regions.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Workflow of the CG-RecNet framework. <bold>(A)</bold> Data Collection: Acquisition of brightfield NSC images. <bold>(B)</bold> Model Architecture: ResNet50 augmented with LinAngular-XCA and GatedCNN. <bold>(C)</bold> Model Explanation: Grad-CAM interpretability and quantitative evaluation.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g001.tif">
<alt-text content-type="machine-generated">Three-panel scientific diagram details a machine learning workflow for neural image analysis. Panel A illustrates data collection from stem cells into neurons, astrocytes, and oligodendrocytes with raw image data samples. Panel B shows a neural network architecture and a training versus validation accuracy plot. Panel C displays model explanation using neural network visualization, Grad-CAM heatmaps on cell images, an ROC curve, and a confusion matrix.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Data Acquisition and class definition</title>
<p>The experimental dataset utilized in this study is derived from the &#x201c;Deep learning-based predictive identification of neural stem cell differentiation&#x201d; database, constructed by Zhu et al. and publicly available on the Figshare platform. This dataset comprises a substantial collection of single-cell images acquired via high-throughput imaging flow cytometry, with samples originating from primary Neural Stem Cells (NSCs) derived from embryonic Sprague-Dawley (SD) rats.</p>
<p>The categorization of samples into three distinct classes&#x2014;astrocytes (Class 0), oligodendrocytes (Class 1), and neurons (Class 2)&#x2014;reflects a classification logic based on a lineage-specific differentiation strategy, rather than mere morphological clustering. This classification paradigm aligns with the fundamental principles of developmental neurobiology, wherein pluripotent NSCs undergo directed differentiation toward specific fates guided by distinct environmental cues. For instance, as detailed in <xref ref-type="table" rid="T1">Table 1</xref>, neurons (Class 2) are defined through specific induction utilizing agents such as Retinoic Acid (RA) and Sonic Hedgehog (SHH); this category constitutes the largest subset (124,403 samples). In contrast, the glial lineage is represented by astrocytes (55,466 samples) and oligodendrocytes (27,687 samples), which were induced by their respective differentiation media. Although this strategy results in class imbalance, we explicitly retained this original distribution to evaluate the model&#x2019;s capacity to identify subtle, biologically dependent feature representations defined by the source benchmark under varying sample densities.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Dataset composition and label mapping.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Class id</th>
<th align="center">Proposed label (English)</th>
<th align="center">Original folder name (Portuguese)</th>
<th align="center">Sample count</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">0</td>
<td align="center">Astrocytes</td>
<td align="center">NSCs treated with astrocyte differentiation medium</td>
<td align="center">55,466</td>
</tr>
<tr>
<td align="center">1</td>
<td align="center">Oligodendrocytes</td>
<td align="center">NSCs treated with oligodendrocyte differentiation medium</td>
<td align="center">27,687</td>
</tr>
<tr>
<td align="center">2</td>
<td align="center">Neurons</td>
<td align="center">NSCs treated with neuron differentiation medium (with retinoic acid (RA) and sonic hedgehog (SHH),etc.)</td>
<td align="center">124,403</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To facilitate the development of a non-invasive classification framework, this study utilized images from the Brightfield channel (Ch1). While the source database included corresponding fluorescence channels&#x2014;AF488-GFAP, PE-Oligo2, and NeuN-APC&#x2014;which established the biological Ground Truth, processing in this study was conducted based on the brightfield modality to evaluate classification performance relying on intrinsic morphological features. For data integrity, the original folder structure provided by the dataset curators was maintained. <xref ref-type="table" rid="T1">Table 1</xref> details the correspondence among the proposed Class IDs, the directory names for specific induction treatments, and the sample size for each category. <xref ref-type="fig" rid="F2">Figure 2</xref> provides a visualization of representative brightfield images for each of the three categories. Additionally, <xref ref-type="fig" rid="F3">Figure 3</xref> illustrates representative multi-channel images, displaying the Brightfield channel (Ch1) alongside the fluorescence channels (Ch2, Ch3, and Ch11) used as reference.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Visualization of representative samples from the dataset. The images are arranged sequentially from 0 to 2, corresponding to the three categories of neural cells: Astrocytes (0), Oligodendrocytes (1), and Neurons (2). All images were acquired using the Brightfield channel (Ch1) of the imaging flow cytometer. Scale bar &#x3d; 10&#xa0;&#x3bc;m.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g002.tif">
<alt-text content-type="machine-generated">Microscopy images arranged in a three-by-three grid show three examples each of astrocytes in the left column, oligodendrocytes in the middle column, and neurons in the right column. Each cell type is labeled with its name and a corresponding number below the grid.</alt-text>
</graphic>
</fig>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Representative multi-channel images acquired via imaging flow cytometry. The figure displays the label-free Brightfield channel (Ch1) alongside the corresponding fluorescence channels used as biological ground truth references: Ch2 (AF488-GFAP), Ch3 (PE-Oligo2), and Ch11 (NeuN-APC). Scale bar &#x3d; 10&#xa0;&#x3bc;m.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g003.tif">
<alt-text content-type="machine-generated">Four-panel scientific image showing three circular cell-like objects observed across four channels labeled Ch1, Ch2, Ch3, and Ch11; fluorescence signal intensity visibly decreases from Ch2 to Ch11, with Ch1 displaying the structural outline.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Data preprocessing</title>
<p>To ensure optimal input quality for the deep learning framework and to enhance model robustness against cellular morphological variations, this study implemented a systematic preprocessing pipeline applied to all raw brightfield single-cell images. This pipeline comprises three key stages: geometric standardization, statistical normalization, and random data augmentation.</p>
<p>First, to standardize the spatial dimensions required by the ResNet50 backbone, we implemented a consistent resizing strategy. For the validation and test sets, images were first resized to 256 pixels along the short edge, followed by a Center Crop to extract a unified 224 &#xd7; 224 pixel region of interest (ROI). This deterministic processing ensures that evaluation metrics reflect the model&#x2019;s recognition performance on the most salient cellular features without introducing artificial geometric distortions.</p>
<p>Subsequently, to facilitate stable convergence during gradient descent optimization, pixel intensity values were converted into floating-point tensors within the range [0, 1]. Next, channel-wise Z-score normalization was applied using ImageNet dataset statistics (mean <inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; [0.485, 0.456, 0.406] and standard deviation <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; [0.229, 0.224, 0.225]). This step standardizes the input distribution, aligning it with the distribution of the backbone network&#x2019;s pretrained weights.</p>
<p>Finally, to mitigate overfitting and improve generalization capability regarding different cellular orientations, data augmentation was performed exclusively on the training dataset. The augmentation strategy incorporated Random Resized Cropping, which randomly samples crop regions from the original images and resizes them to 224 &#xd7; 224 pixels, thereby simulating variations in cellular scale and imaging focus. Additionally, random horizontal flipping with a probability of 0.5 was applied to accommodate the rotational invariance inherent in suspension cell imaging (<xref ref-type="bibr" rid="B33">Shorten and Khoshgoftaar, 2019</xref>). Regarding dataset gating, raw IFC data were pre-gated based on area and aspect ratio to remove debris and doublets, ensuring that the majority of inputs represented single cells. However, minor physical aggregates may remain, reflecting real-world high-throughput screening conditions. Crucially, to strictly prevent data leakage and ensure an unbiased evaluation of the model&#x2019;s diagnostic capabilities, no random augmentation techniques were applied to the validation or test sets; they were strictly maintained in a standardized, deterministic state (<xref ref-type="bibr" rid="B37">Varma and Simon, 2006</xref>).</p>
<p>
<statement content-type="algorithm" id="Algorithm_1">
<label>Algorithm 1</label>
<title>Preprocessing and Augmentation Pipeline for Neural Cell Images.</title>
<p>
<list list-type="simple">
<list-item>
<p>
<bold>Input:</bold> Raw brightfield single-cell dataset <inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="{" close="}" separators="|">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>Output:</bold> Augmented Training Set <inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>train</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>, Standardized Validation Set <inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>val</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>, and Test Set <inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>test</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>Begin</bold>
</p>
</list-item>
<list-item>
<p>&#x2003;Split <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:mi mathvariant="normal">D</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> into <inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>train</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf9">
<mml:math id="m9">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>val</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf10">
<mml:math id="m10">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>test</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>Function</bold> Preprocess(Image <inline-formula id="inf11">
<mml:math id="m11">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>):</p>
</list-item>
<list-item>
<p>&#x2003;<inline-formula id="inf12">
<mml:math id="m12">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>&#x2190;</mml:mo>
<mml:mtext>Resize</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>256</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>&#x2003;<inline-formula id="inf13">
<mml:math id="m13">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>&#x2190;</mml:mo>
<mml:mtext>CenterCrop</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>,</mml:mo>
<mml:mn>224</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>224</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>&#x2003;<inline-formula id="inf14">
<mml:math id="m14">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>&#x2190;</mml:mo>
<mml:mtext>ToTensor</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>/</mml:mo>
<mml:mn>225.0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>&#x2003;<inline-formula id="inf15">
<mml:math id="m15">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>&#x2190;</mml:mo>
<mml:mtext>Normalize</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">&#x3bc;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mn>0.485</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mn>0.229</mml:mn>
<mml:mo>,</mml:mo>
<mml:mo>&#x2026;</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>&#x2003;<inline-formula id="inf16">
<mml:math id="m16">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>&#x2190;</mml:mo>
<mml:mtext>RandomHorizontalFlip</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.5</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>Return</bold> <inline-formula id="inf17">
<mml:math id="m17">
<mml:mrow>
<mml:mi mathvariant="bold">I</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>End Function</bold>
</p>
</list-item>
<list-item>
<p>
<bold>For</bold> each image <inline-formula id="inf18">
<mml:math id="m18">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> in <inline-formula id="inf19">
<mml:math id="m19">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>val</mml:mtext>
</mml:msub>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>test</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> <bold>do</bold>:</p>
</list-item>
<list-item>
<p>&#x2003;<inline-formula id="inf20">
<mml:math id="m20">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>&#x2190;</mml:mo>
<mml:mtext>StandarPreprocess</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mi mathvariant="normal">i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>End For</bold>
</p>
</list-item>
<list-item>
<p>Apply stochastic augmentation for generalization</p>
</list-item>
<list-item>
<p>
<bold>For</bold> each image <inline-formula id="inf21">
<mml:math id="m21">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mi mathvariant="normal">j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> in <inline-formula id="inf22">
<mml:math id="m22">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>train</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> <bold>do</bold>:</p>
</list-item>
<list-item>
<p>Apply stochastic augmentation for generalization</p>
</list-item>
<list-item>
<p>&#x2003;<inline-formula id="inf23">
<mml:math id="m23">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mi mathvariant="normal">j</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>&#x2190;</mml:mo>
<mml:mtext>TrainAugment</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">I</mml:mi>
<mml:mi mathvariant="normal">j</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>Add processed sample to <inline-formula id="inf24">
<mml:math id="m24">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>train</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>End For</bold>
</p>
</list-item>
<list-item>
<p>
<bold>Return</bold> <inline-formula id="inf25">
<mml:math id="m25">
<mml:mrow>
<mml:msubsup>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>train</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:mo>,</mml:mo>
<mml:msubsup>
<mml:mi mathvariant="normal">D</mml:mi>
<mml:mtext>val</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
<mml:msubsup>
<mml:mrow>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">D</mml:mi>
</mml:mrow>
<mml:mtext>test</mml:mtext>
<mml:mo>&#x2032;</mml:mo>
</mml:msubsup>
</mml:mrow>
</mml:math>
</inline-formula>
</p>
</list-item>
<list-item>
<p>
<bold>End</bold>
</p>
</list-item>
</list>
</p>
</statement>
</p>
</sec>
<sec id="s2-4">
<label>2.4</label>
<title>Proposed deep learning architecture and model details</title>
<p>To address the inherent limitations of standard Convolutional Neural Networks (CNNs) in capturing the multiscale morphological intricacies of differentiating Neural Stem Cells (NSCs), we propose a unified framework termed CG-RecNet. While traditional backbones such as ResNet50 excel at extracting hierarchical features, they rely heavily on local convolutions with fixed receptive fields, which often limits their ability to model global dependencies and effectively suppress background noise in label-free microscopy. As illustrated in <xref ref-type="fig" rid="F4">Figure 4</xref>, our framework advances the ResNet50 backbone by integrating two novel architectural components strategically placed after the second residual stage: the LinAngular Cross-Channel Attention (LinAngular-XCA) Fusion Module and the Gated Convolutional Neural Network (GatedCNN) Block (<xref ref-type="bibr" rid="B5">Dauphin et al., 2017</xref>; <xref ref-type="bibr" rid="B26">Liu et al., 2022</xref>). Specifically, the LinAngular-XCA module is designed by synergistically integrating Linear-complexity Angular Attention (<xref ref-type="bibr" rid="B45">Zhou et al., 2023b</xref>) and Cross-Covariance Attention (XCA) (<xref ref-type="bibr" rid="B9">El-Nouby et al., 2021</xref>) through a cross-fusion framework inspired by recent hybrid attention architectures (<xref ref-type="bibr" rid="B44">Zhou et al., 2023a</xref>). This strategic placement ensures that the model captures both high-resolution spatial features and long-range semantic dependencies early in the feature extraction process.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Overview of the Proposed Model Architecture. <bold>(A)</bold> The CG-RecNet framework, augmenting ResNet50 with LinAngular-XCA Fusion and Gated CNN Block. <bold>(B)</bold> LinAngular-XCA Fusion Module for global morphological modeling. <bold>(C)</bold> Gated CNN Block for noise suppression and local feature refinement.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g004.tif">
<alt-text content-type="machine-generated">Diagram illustrating a deep learning model architecture for image analysis, consisting of three main sections: (A) a sequential flowchart of convolutional and residual blocks with LinAngular-XCA Fusion and GatedCNN modules, (B) detailed structures of channel and spatial attention modules, and (C) a block diagram showing data flow through normalization, convolutions, and gating mechanisms.</alt-text>
</graphic>
</fig>
<p>To rigorously evaluate the contribution of each component, we define two intermediate model variants: ResCMNet, which incorporates only the attention fusion mechanism to enhance global context, and ResGDNet, which utilizes only the gated convolution to refine feature selection. The final CG-RecNet synergistically combines both modules to achieve reliable lineage prediction.</p>
<sec id="s2-4-1">
<label>2.4.1</label>
<title>LinAngular-XCA Fusion Module (ResCMNet)</title>
<p>The accurate discrimination of cell fates relies on distinguishing subtle membrane textures and capturing long-range morphological dependencies, such as the correlation between nuclear elongation and distant neurite outgrowth. Standard convolutions, limited by local receptive fields, often fail to model these global interactions. To address this, we introduce the LinAngular-XCA Fusion Module (<xref ref-type="fig" rid="F4">Figure 4B</xref>). Inspired by the dual-branch paradigm of the Convolutional Block Attention Module (CBAM) (<xref ref-type="bibr" rid="B40">Woo et al., 2018</xref>), our design advances this concept by integrating two specialized mechanisms&#x2014;LinAngular Attention for spatial dependencies and Cross-Covariance Attention for channel interactions&#x2014;via a cross-fusion strategy (<xref ref-type="bibr" rid="B44">Zhou et al., 2023a</xref>).</p>
<p>To capture global spatial contexts without the quadratic computational complexity (<inline-formula id="inf26">
<mml:math id="m26">
<mml:mrow>
<mml:mi mathvariant="normal">O</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="normal">N</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>) of standard self-attention, we incorporate the LinAngular Attention mechanism. Unlike conventional dot-product attention, this component exploits the associativity of matrix multiplication to achieve linear complexity with respect to sequence length. The input feature map <inline-formula id="inf27">
<mml:math id="m27">
<mml:mrow>
<mml:mi mathvariant="normal">X</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is projected into query (<inline-formula id="inf28">
<mml:math id="m28">
<mml:mrow>
<mml:mi mathvariant="normal">Q</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>), key (<inline-formula id="inf29">
<mml:math id="m29">
<mml:mrow>
<mml:mi mathvariant="normal">K</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>), and value (<inline-formula id="inf30">
<mml:math id="m30">
<mml:mrow>
<mml:mi mathvariant="normal">V</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>) embeddings. To ensure numerical stability and consistent feature magnitude during the linear approximation, we introduce specific normalization terms. The computation is formally expressed as <xref ref-type="disp-formula" rid="e1">Equation 1</xref>:<disp-formula id="e1">
<mml:math id="m31">
<mml:mrow>
<mml:mtext>LinAngular</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">X</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="script">N</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="normal">&#x3c0;</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mi mathvariant="normal">Q</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="normal">K</mml:mi>
<mml:mi mathvariant="normal">T</mml:mi>
</mml:msup>
<mml:mi mathvariant="normal">V</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>0.5</mml:mn>
<mml:mi mathvariant="normal">V</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>where <inline-formula id="inf31">
<mml:math id="m32">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>/</mml:mo>
<mml:mi mathvariant="normal">&#x3c0;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> serves as an empirical angular scaling factor to stabilize gradient flow, <inline-formula id="inf32">
<mml:math id="m33">
<mml:mrow>
<mml:mi mathvariant="script">N</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> denotes Layer Normalization, and the computation order <inline-formula id="inf33">
<mml:math id="m34">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="normal">K</mml:mi>
<mml:mo>&#x22a4;</mml:mo>
</mml:msup>
<mml:mi mathvariant="normal">V</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> reduces the complexity to linear scale.</p>
<p>Complementary to spatial processing, we incorporate a Cross-Covariance Attention (XCA) branch to explicitly model global interactions between feature channels (<xref ref-type="bibr" rid="B9">El-Nouby et al., 2021</xref>). This process generates a global covariance map by applying attention operations along the channel dimension rather than the spatial dimension, defined as <xref ref-type="disp-formula" rid="e2">Equation 2</xref>:<disp-formula id="e2">
<mml:math id="m35">
<mml:mrow>
<mml:mtext>XCA</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">Q</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">K</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">V</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">V</mml:mi>
<mml:mo>&#xb7;</mml:mo>
<mml:mtext>Softmax</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="normal">Q</mml:mi>
<mml:mi mathvariant="normal">T</mml:mi>
</mml:msup>
<mml:mi mathvariant="normal">K</mml:mi>
</mml:mrow>
<mml:mi mathvariant="normal">&#x3c4;</mml:mi>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>where <inline-formula id="inf34">
<mml:math id="m36">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3c4;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is a learnable temperature parameter that scales the inner product, effectively highlighting co-activated feature channels associated with specific lineage markers.</p>
<p>Distinct from approaches that apply these maps sequentially, we implement a Cross-Attention Fusion strategy adapted from <xref ref-type="bibr" rid="B44">Zhou et al. (2023a)</xref>. This submodule projects refined features from both branches to compute a cross-covariance matrix, recalibrating the spatial focus of the LinAngular branch with the channel-wise context of the XCA branch. The final fused output is computed as <xref ref-type="disp-formula" rid="e3">Equation 3</xref>:<disp-formula id="e3">
<mml:math id="m37">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">F</mml:mi>
<mml:mtext>out</mml:mtext>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">F</mml:mi>
<mml:mtext>LA</mml:mtext>
</mml:msub>
<mml:mo>&#x2297;</mml:mo>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="script">G</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">F</mml:mi>
<mml:mtext>XCA</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">F</mml:mi>
<mml:mtext>XCA</mml:mtext>
</mml:msub>
<mml:mo>&#x2297;</mml:mo>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="script">G</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">F</mml:mi>
<mml:mtext>LA</mml:mtext>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>where <inline-formula id="inf35">
<mml:math id="m38">
<mml:mrow>
<mml:mi mathvariant="script">G</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the global context interaction operation, <inline-formula id="inf36">
<mml:math id="m39">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> denotes the Sigmoid activation function, and <inline-formula id="inf37">
<mml:math id="m40">
<mml:mrow>
<mml:mo>&#x2297;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> indicates element-wise multiplication. By integrating this module, the network outputs a recalibrated feature map where lineage-specific characteristics are significantly enhanced through global context modeling.</p>
</sec>
<sec id="s2-4-2">
<label>2.4.2</label>
<title>Gated Convolutional Block (ResGDNet)</title>
<p>Following feature extraction, it is imperative to selectively propagate biologically relevant information while dampening noise, particularly given the low contrast and floating debris typical of label-free brightfield microscopy. To achieve this, we incorporate the Gated CNN Block (<xref ref-type="fig" rid="F4">Figure 4C</xref>). This design draws inspiration from the Gated Linear Units (GLU) originally proposed for language modeling by <xref ref-type="bibr" rid="B5">Dauphin et al. (2017)</xref>, but we adapt it here for 2D visual feature maps within an inverted bottleneck architecture (<xref ref-type="bibr" rid="B26">Liu et al., 2022</xref>).</p>
<p>As depicted in <xref ref-type="fig" rid="F4">Figure 4C</xref>, the input tensor undergoes Layer Normalization to stabilize training dynamics before being projected into a higher-dimensional space via a fully connected (FC1) layer. The flow is then bifurcated into two parallel paths: a content path and a gating path. The gating path acts as a learnable filter, utilizing a projection layer followed by a Gaussian Error Linear Unit (GELU) activation.</p>
<p>The critical operation is the element-wise multiplication of the content path by the gating path. This mechanism enables the network to learn a dynamic feature selection policy: the gate &#x201c;opens&#x201d; (values approaching 1) for features strongly correlated with differentiation markers&#x2014;such as the branching patterns of oligodendrocytes&#x2014;and &#x201c;closes&#x201d; (values approaching 0) for ambiguous background regions. This selective mechanism functionally mimics the noise-suppression capability of the biological visual system. Formally, given an input feature map <inline-formula id="inf38">
<mml:math id="m41">
<mml:mrow>
<mml:mi mathvariant="normal">X</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>, the output <inline-formula id="inf39">
<mml:math id="m42">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> of the block is defined as <xref ref-type="disp-formula" rid="e4">Equation 4</xref>:<disp-formula id="e4">
<mml:math id="m43">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
</mml:msub>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">X</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">X</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>&#x2a;</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi mathvariant="normal">W</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2297;</mml:mo>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">X</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>&#x2a;</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi mathvariant="normal">V</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">c</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>where <inline-formula id="inf40">
<mml:math id="m44">
<mml:mrow>
<mml:mi mathvariant="normal">W</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf41">
<mml:math id="m45">
<mml:mrow>
<mml:mi mathvariant="normal">V</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> are learned kernels for the linear transmission and gating operations, respectively, and <inline-formula id="inf42">
<mml:math id="m46">
<mml:mrow>
<mml:mo>&#x2297;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> denotes element-wise multiplication (<xref ref-type="bibr" rid="B5">Dauphin et al., 2017</xref>).</p>
<p>In our implementation, to optimize parameter efficiency, we incorporate depthwise convolution within the gating mechanism. The input is partitioned into three components: gate (<inline-formula id="inf43">
<mml:math id="m47">
<mml:mrow>
<mml:mi mathvariant="normal">g</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>), information (<inline-formula id="inf44">
<mml:math id="m48">
<mml:mrow>
<mml:mi mathvariant="normal">i</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>), and context (<inline-formula id="inf45">
<mml:math id="m49">
<mml:mrow>
<mml:mi mathvariant="normal">c</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>). The context component undergoes depthwise convolution, applying a single filter to each input channel (<xref ref-type="disp-formula" rid="e5">Equation 5</xref>):<disp-formula id="e5">
<mml:math id="m50">
<mml:mrow>
<mml:mtext>Dept</mml:mtext>
<mml:mi mathvariant="normal">h</mml:mi>
<mml:mtext>wiseConv</mml:mtext>
<mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">F</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="normal">i</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mrow>
<mml:mi mathvariant="normal">k</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">l</mml:mi>
</mml:mrow>
</mml:munder>
</mml:mstyle>
<mml:msub>
<mml:mi mathvariant="normal">K</mml:mi>
<mml:mrow>
<mml:mi mathvariant="normal">k</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#xb7;</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">F</mml:mi>
<mml:mrow>
<mml:mi mathvariant="normal">i</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">k</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">j</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>where <inline-formula id="inf46">
<mml:math id="m51">
<mml:mrow>
<mml:mi mathvariant="normal">F</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> represents the input feature map and <inline-formula id="inf47">
<mml:math id="m52">
<mml:mrow>
<mml:mi mathvariant="normal">K</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the depthwise kernel. By integrating the gating mechanism with depthwise convolution, the model achieves a favorable balance between computational complexity and predictive performance. The gating operation <inline-formula id="inf48">
<mml:math id="m53">
<mml:mrow>
<mml:mi mathvariant="normal">&#x3c3;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi mathvariant="normal">X</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>&#x2a;</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mi mathvariant="normal">V</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">c</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> allows the network to suppress irrelevant background artifacts while retaining high-frequency texture details, effectively functioning as a &#x201c;soft&#x201d; attention mechanism. Finally, a residual connection is employed to ensure gradient stability during deep network training.</p>
</sec>
<sec id="s2-4-3">
<label>2.4.3</label>
<title>CG-RecNet: integrated model architecture</title>
<p>The CG-RecNet represents the final, integrated model architecture, built upon the hierarchical feature extraction of the ResNet50 backbone to address the dual challenges of fine-grained morphological classification and noise suppression in label-free imaging. The structure is strategically designed to optimize feature flow by sequentially applying global context modeling and local feature refinement within the core network.</p>
<p>Specifically, the LinAngular-XCA Fusion Module is introduced directly following ResBlock Stage 2. This placement ensures that the feature maps, having acquired reliable hierarchical representations from the initial convolutional stages, are subjected to Global Recalibration. By explicitly modeling long-range spatial and cross-channel dependencies at this mid-level stage, the model gains a comprehensive contextual understanding of the cell morphology, overcoming the local limitations of the early residual layers.</p>
<p>Subsequently, the Gated CNN Block is inserted immediately after the attention module, serving as a critical Local Refinement mechanism. Its function as a learnable filter is to selectively modulate the information flow before features pass into the deeper ResBlock Stage 3 and Stage 4. This sequential placement&#x2014;hierarchical extraction <inline-formula id="inf49">
<mml:math id="m54">
<mml:mrow>
<mml:mo>&#x2192;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> global context recalibration <inline-formula id="inf50">
<mml:math id="m55">
<mml:mrow>
<mml:mo>&#x2192;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> noise-reliable local refinement&#x2014;ensures that only the most discriminative and noise-free features propagate to the final classification layers. This synergistic integration allows CG-RecNet to achieve reliable lineage prediction superior to its ablation variants.</p>
</sec>
<sec id="s2-4-4">
<label>2.4.4</label>
<title>Comparative models</title>
<p>To validate the specific contributions of our proposed architecture, we established a systematic comparison ranging from the ResNet50 baseline (<xref ref-type="bibr" rid="B17">He et al., 2016</xref>), which is limited by fixed receptive fields and standard convolutions, to two progressive variants: ResCMNet, which integrates the LinAngular-XCA Fusion Module to enhance global spatial focus but remains susceptible to noise, and ResGDNet, which employs Gated CNN Blocks to filter background artifacts and refine local textures. Building upon these findings, our proposed CG-RecNet synergistically combines both mechanisms, leveraging dual-branch attention for global context modeling and gated convolutions for noise suppression to address the dual challenges of structural analysis and fine-grained texture recognition, thereby achieving superior classification robustness in label-free stem cell imagery.</p>
<p>To benchmark our proposed method against broader architectural paradigms, we evaluated four state-of-the-art models: DenseNet (<xref ref-type="bibr" rid="B18">Huang et al., 2017</xref>), which excels in feature reuse via dense connectivity but suffers from high computational redundancy due to channel concatenation; VGG (<xref ref-type="bibr" rid="B34">Simonyan and Zisserman, 2015</xref>), a classic deep network that, despite reliable hierarchical extraction, is limited by excessive parameter volume (approx. 138M) and a lack of global context awareness; MobileNet V2 (<xref ref-type="bibr" rid="B32">Sandler et al., 2018</xref>), an efficient architecture renowned for its low computational cost and parameter count, achieved primarily through its inverted residual structure and linear bottlenecks, making it a strong benchmark for deployment efficiency; and Vision Transformer (ViT) (<xref ref-type="bibr" rid="B8">Dosovitskiy et al., 2021</xref>), which leverages self-attention for long-range dependencies but struggles with data efficiency and high-frequency local texture capture due to the absence of convolutional inductive biases.</p>
<p>Furthermore, to ensure the high of domain-specific relevance, our comparative suite critically includes the <xref ref-type="bibr" rid="B46">Zhu et al. (2021)</xref> Xception-based Model, which first validated the concept of label-free NSC differentiation prediction using the same underlying dataset. The Xception architecture, utilizing depthwise separable convolutions, represents the existing specialized benchmark for efficiency and performance in this domain. The inclusion of this direct competitor is essential to substantiate that CG-RecNet provides a significant architectural advance over the established methodology for accurate and noise-reliable classification of neural stem cell lineages.</p>
</sec>
</sec>
<sec id="s2-5">
<label>2.5</label>
<title>Model evaluation metric</title>
<p>The performance of CG-RecNet was evaluated using a comprehensive suite of standard metrics, including Accuracy, Precision, Recall, and the F1-Score, the latter of which provides a balanced assessment of class-imbalanced data. To evaluate discriminative capability across various thresholds, we utilized the Receiver Operating Characteristic (ROC) curve and calculated the macro-average Area Under the Curve (AUC). Statistical significance was assessed using a paired t-test on the 5-fold cross-validation results, with p &#x3c; 0.05 considered significant. Detailed mathematical formulations for these metrics are provided in the <xref ref-type="sec" rid="s13">Supplementary Material</xref>.</p>
</sec>
</sec>
<sec sec-type="results|discussion" id="s3">
<label>3</label>
<title>Results and discussion</title>
<sec id="s3-1">
<label>3.1</label>
<title>Experimental setup and data partitioning</title>
<p>To rigorously evaluate the model&#x2019;s generalization capability and prevent data leakage, we employed a stratified splitting strategy for dataset partitioning. This approach ensures that the class distribution across all subsets remains balanced while strictly segregating samples to preclude any overlap between the training and evaluation sets. Specifically, the dataset was partitioned to allocate 70% for model training, 15% for validation, and 15% as an independent test set. The validation set was utilized to monitor model convergence, facilitate hyperparameter tuning, and implement early stopping mechanisms. The resulting distribution structure comprises independent training, validation, and testing sets. It is crucial to clarify that while the final application of CG-RecNet is &#x201c;label-free&#x201d; (using only brightfield images for inference), the ground truth labels for training the Cross-Entropy Loss function were established using gold-standard immunofluorescent staining during dataset preparation. To ensure the reproducibility of our results, <xref ref-type="table" rid="T2">Table 2</xref> summarizes the detailed implementation parameters and training configurations. The proposed model was implemented using the PyTorch framework (version 1.12.0) and trained on a workstation equipped with a 13th Gen Intel Core i7-13620H CPU and an NVIDIA GeForce RTX 4050 Laptop GPU (6&#xa0;GB VRAM). We employed the Adam optimizer for parameter updates, selected for its efficacy in handling sparse gradients and adaptive learning rates. The training process was conducted over 100 epochs with a batch size of 8. Cross-Entropy was utilized as the loss function. To mitigate overfitting, we adopted the StepLR learning rate decay strategy, configured to decay the learning rate by a factor of 0.1 every 7 epochs.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Model hyperparameters and training Configuration.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Parameter</th>
<th align="center">Configuration/Value</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Input resolution</styled-content>
</td>
<td align="center">
<inline-formula id="inf51">
<mml:math id="m56">
<mml:mrow>
<mml:mn>224</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>224</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> <styled-content style="color:#1A1C1E">pixels</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Model architecture</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">ResNet50 &#x2b; LinAngular-XCA fusion &#x2b; gated CNN</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Optimizer</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">Adam (</styled-content>
<inline-formula id="inf52">
<mml:math id="m57">
<mml:mrow>
<mml:msub>
<mml:mi mathvariant="normal">&#x3b2;</mml:mi>
<mml:mn>1</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.9</mml:mn>
<mml:mo>,</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">&#x3b2;</mml:mi>
<mml:mn>2</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.999</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
<styled-content style="color:#1A1C1E">; </styled-content>
<inline-formula id="inf53">
<mml:math id="m58">
<mml:mrow>
<mml:mi>&#x3f5;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msup>
<mml:mn>10</mml:mn>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>8</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
<styled-content style="color:#1A1C1E">)</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Initial learning rate</styled-content>
</td>
<td align="center">
<inline-formula id="inf54">
<mml:math id="m59">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:msup>
<mml:mn>10</mml:mn>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Learning rate schedule</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">ReduceLROnPlateau (factor 0.1, patience 5)</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Batch size</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">8</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Total epochs</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">100</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Loss function</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">Cross-entropy loss</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Activation functions</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">ReLU (intermediate), softmax (final classification)</styled-content>
</td>
</tr>
<tr>
<td align="center">
<styled-content style="color:#1A1C1E">Hardware</styled-content>
</td>
<td align="center">
<styled-content style="color:#1A1C1E">Intel core i7-13620H/RTX 4050 (6&#xa0;GB)</styled-content>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To ensure the reproducibility of our results, all experiments were conducted using multiple fixed random seeds (seeds &#x3d; 42, 123, and 999) to verify that the performance gains of CG-RecNet are not artifacts of hyperparameter selection. The reported p-values (p &#x3c; 0.001) confirm that the architectural improvements are statistically significant regardless of initial weight initialization.</p>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Results of the model</title>
<sec id="s3-2-1">
<label>3.2.1</label>
<title>Results of the ablation study</title>
<p>To systematically evaluate the contribution of each proposed module&#x2014;specifically the GatedCNN module and the LinAngular-XCA fusion mechanism&#x2014;we conducted an ablation study. We compared the performance of the Baseline model (ResNet50), the model with only the GatedCNN module (ResGDNet), the model with only the LinAngular-XCA module (ResCMNet), and the proposed full framework (CG-RecNet). The quantitative results, presented as mean &#xb1; standard deviation across 5-fold cross-validation, are summarized in <xref ref-type="table" rid="T3">Table 3</xref>.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Result of the test.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Model</th>
<th align="center">Accuracy</th>
<th align="center">Precision</th>
<th align="center">Recall</th>
<th align="center">F1</th>
<th align="center">Total parameters (M)</th>
<th align="center">GFLOPs</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">CG-RecNet</td>
<td align="center">
<inline-formula id="inf55">
<mml:math id="m60">
<mml:mrow>
<mml:mn mathvariant="bold">96.40</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.07</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf56">
<mml:math id="m61">
<mml:mrow>
<mml:mn mathvariant="bold">96.38</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.19</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf57">
<mml:math id="m62">
<mml:mrow>
<mml:mn mathvariant="bold">96.40</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.06</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf58">
<mml:math id="m63">
<mml:mrow>
<mml:mn mathvariant="bold">96.39</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.05</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<bold>28.02</bold>
</td>
<td align="center">
<bold>8.40</bold>
</td>
</tr>
<tr>
<td align="center">ResCMNet</td>
<td align="center">
<inline-formula id="inf59">
<mml:math id="m64">
<mml:mrow>
<mml:mn>96.32</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.11</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf60">
<mml:math id="m65">
<mml:mrow>
<mml:mn>96.29</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.15</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf61">
<mml:math id="m66">
<mml:mrow>
<mml:mn>96.32</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.09</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf62">
<mml:math id="m67">
<mml:mrow>
<mml:mn>96.30</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.07</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">27.69</td>
<td align="center">8.19</td>
</tr>
<tr>
<td align="center">ResGDNet</td>
<td align="center">
<inline-formula id="inf63">
<mml:math id="m68">
<mml:mrow>
<mml:mn>95.63</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.08</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf64">
<mml:math id="m69">
<mml:mrow>
<mml:mn>95.58</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.25</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf65">
<mml:math id="m70">
<mml:mrow>
<mml:mn>95.63</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.05</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf66">
<mml:math id="m71">
<mml:mrow>
<mml:mn>95.60</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.06</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">25.89</td>
<td align="center">7.73</td>
</tr>
<tr>
<td align="center">Baseline</td>
<td align="center">
<inline-formula id="inf67">
<mml:math id="m72">
<mml:mrow>
<mml:mn>94.58</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.08</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf68">
<mml:math id="m73">
<mml:mrow>
<mml:mn>94.50</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.19</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf69">
<mml:math id="m74">
<mml:mrow>
<mml:mn>94.58</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.12</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf70">
<mml:math id="m75">
<mml:mrow>
<mml:mn>94.47</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.07</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">25.51</td>
<td align="center">6.13</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Bold values indicate the best performance metrics among the compared models.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>The visualization in <xref ref-type="fig" rid="F5">Figure 5</xref> provides a assessment of the CG-RecNet&#x2019;s stability and learning process across five distinct data partitions. (A) Illustrates the trajectory of the accuracy metric during the 5-fold cross-validation process. The bold solid lines denote the mean training and validation accuracies across the five folds, while the shaded regions represent the standard deviation. The thin lines in the background display the specific performance of each individual fold (Fold 1&#x2013;5), effectively reflecting the model&#x2019;s stability across distinct data partitions. (B) Presents the corresponding loss convergence curves. The consistent decline in both training and validation losses over the epochs indicates strong convergence capabilities, with no significant evidence of overfitting. (C) Provides a detailed training overview of Fold 5. As a representative fold, this plot further indicates the close alignment between training and validation metrics.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Five-Fold Cross-Validation Performance Metrics and Training Dynamics for the CG-RecNet Model. <bold>(A)</bold> Accuracy trajectory across all five folds. <bold>(B)</bold> Loss convergence curves. <bold>(C)</bold> Detailed training dynamics for a representative fold (Fold 5). <bold>(D)</bold> Statistical distribution of performance metrics.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g005.tif">
<alt-text content-type="machine-generated">Panel A shows a line graph of accuracy across epochs for five cross-validation folds and their mean, with accuracy stabilizing near one. Panel B presents a loss graph similarly structured, showing consistent low loss. Panel C displays a 3D bar plot of accuracy by fold and epoch, with color gradients from red to yellow indicating values. Panel D has two box plots comparing accuracy and F1-score for four models, with CG-RecNet consistently outperforming others; statistical significance is indicated above comparisons.</alt-text>
</graphic>
</fig>
<p>To rigorously validate the statistical reliability of these results, (D) presents the box plots of Accuracy and F1-Score distributions. Unlike the trajectories, this panel explicitly highlights the variance and median performance, with the Student&#x2019;s t-test results (<inline-formula id="inf71">
<mml:math id="m76">
<mml:mrow>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mo>&#x3c;</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mn>0.001</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) confirming a statistically significant improvement of CG-RecNet over the Baseline. Collectively, these indicators substantiate the model&#x2019;s reliable performance and generalization capability on the stem cell dataset.</p>
<p>
<xref ref-type="fig" rid="F6">Figure 6</xref> displays the ROC curves comparing the diagnostic performance of CG-RecNet, Baseline, ResGDNet, and ResCMNet architectures. The CG-RecNet curves are notably positioned closest to the top-left corner across all classes, indicating the model&#x2019;s superior capability to distinguish between different categories and achieving the highest diagnostic accuracy (AUC) and model stability.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Comparative Receiver Operating Characteristic (ROC) curves for Various Models.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g006.tif">
<alt-text content-type="machine-generated">Four-panel figure showing ROC curves comparing CG-RecNet, Baseline, ResGDNet, and ResCMNet models for classifying astrocytes, oligodendrocytes, and neurons. Each panel displays three class curves and corresponding micro- and macro-average AUCs, all with high AUC values near one, indicating strong model performance. False positive rate is on the x-axis and true positive rate is on the y-axis.</alt-text>
</graphic>
</fig>
<p>
<xref ref-type="fig" rid="F7">Figure 7</xref> displays the normalized confusion matrices for the CG-RecNet, Baseline, ResGDNet, and ResCMNet architectures. The x-axis represents the predicted labels, while the y-axis represents the true labels. Class labels are defined as: 0 &#x3d; Astro, 1 &#x3d; Oligo, and 2 &#x3d; Neuron. The matrix for CG-RecNet reveals a dense concentration of samples along the main diagonal, achieving 99% accuracy for Astrocytes, 89% for the minority Oligodendrocytes, and 97% for Neurons. This visualization demonstrates the model&#x2019;s high predictive precision and low inter-class confusion compared to the Baseline and ablation variants on the internal dataset.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Normalized confusion matrices.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g007.tif">
<alt-text content-type="machine-generated">Four confusion matrices compare classification accuracy for three cell types (Astro, Oligo, Neuron) across four models: CG-RecNet, Baseline, ResGDNet, and ResCMNet, showing CG-RecNet achieves the highest accuracy, particularly for Oligo. Each matrix displays true versus predicted labels, with color intensity indicating classification performance.</alt-text>
</graphic>
</fig>
<p>The diagnostic efficacy of CG-RecNet was further evaluated in relation to computational complexity to assess the trade-off between performance and resource cost. As detailed in <xref ref-type="table" rid="T3">Table 3</xref>, the integration of novel components led to progressive performance gains. The introduction of the attention mechanism (ResCMNet) resulted in a notable accuracy increase from the Baseline&#x2019;s 94.58% &#xb1; 0.08% to 96.32% &#xb1; 0.11%. Although this addition increased computational cost (from 6.13 GFLOPs to 8.19 GFLOPs) due to global context modeling, the subsequent integration of the Gated CNN Block in the final CG-RecNet framework achieved the peak accuracy of 96.40% &#xb1; 0.07%. Statistical analysis confirmed that the improvement over the Baseline was statistically significant (p &#x3c; 0.001).</p>
<p>Regarding model complexity, the final integrated architecture (CG-RecNet) utilizes 28.02&#xa0;M parameters and 8.40 GFLOPs. This 1.82% absolute improvement in accuracy is accompanied by a &#x223c;9.8% increase in model parameters (from 25.51&#xa0;M to 28.02&#xa0;M) and a &#x223c;37% increase in GFLOPs (from 6.13 to 8.40). While CG-RecNet is more efficient than older, high-parameter architectures like VGG-16, the baseline ResNet50 may offer a superior balance between predictive capability and computational resource consumption&#x2014;providing a better &#x2018;bang for the buck&#x27;&#x2014;for users with limited hardware resources.</p>
</sec>
<sec id="s3-2-2">
<label>3.2.2</label>
<title>Results of the comparison experiments</title>
<p>To strictly evaluate the performance of the proposed method, we conducted comparative experiments against several mainstream deep learning models, including DenseNet (<xref ref-type="bibr" rid="B18">Huang et al., 2017</xref>), VGG (<xref ref-type="bibr" rid="B34">Simonyan and Zisserman, 2015</xref>), Vision Transformer (VIT) (<xref ref-type="bibr" rid="B8">Dosovitskiy et al., 2021</xref>) and MobileNet V2 (<xref ref-type="bibr" rid="B32">Sandler et al., 2018</xref>). The quantitative results are presented in <xref ref-type="table" rid="T3">Table 3</xref>, focusing on four key evaluation metrics: Accuracy, Precision, Recall, and F1-score.</p>
<p>As presented in <xref ref-type="table" rid="T4">Table 4</xref>, the proposed CG-RecNet achieved superior performance across all evaluation metrics, demonstrating its robustness in the classification task. Specifically, CG-RecNet attained the highest Accuracy of 96.40% &#xb1; 0.07%, Precision of 96.38% &#xb1; 0.19%, Recall of 96.40% &#xb1; 0.06%, and F1-score of 96.39% &#xb1; 0.05%. Notably, our model outperformed the second-best architecture, MobileNet V2, which recorded an accuracy of 96.07% &#xb1; 0.12% and an F1-score of 96.04% &#xb1; 0.09%. DenseNet and VGG followed closely with accuracies of 96.04% &#xb1; 0.14% and 95.82% &#xb1; 0.18%, respectively, whereas the ViT model yielded a comparatively lower accuracy of 94.27% &#xb1; 0.25%. These quantitative results&#x2014;highlighting both superior mean performance and lower variance&#x2014;substantiate the efficacy of the proposed architectural enhancements, confirming that CG-RecNet offers a highly reliable solution relative to established baselines.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Result of the Comparison experiments.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Model</th>
<th align="center">Accuracy</th>
<th align="center">Precision</th>
<th align="center">Recall</th>
<th align="center">F1</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">CG-RecNet</td>
<td align="center">
<inline-formula id="inf72">
<mml:math id="m77">
<mml:mrow>
<mml:mn mathvariant="bold">96.40</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.07</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf73">
<mml:math id="m78">
<mml:mrow>
<mml:mn mathvariant="bold">96.38</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.19</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf74">
<mml:math id="m79">
<mml:mrow>
<mml:mn mathvariant="bold">96.40</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.06</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf75">
<mml:math id="m80">
<mml:mrow>
<mml:mn mathvariant="bold">96.39</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn mathvariant="bold">0.05</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">MobileNet V2</td>
<td align="center">
<inline-formula id="inf76">
<mml:math id="m81">
<mml:mrow>
<mml:mn>96.07</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.12</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf77">
<mml:math id="m82">
<mml:mrow>
<mml:mn>96.03</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.15</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf78">
<mml:math id="m83">
<mml:mrow>
<mml:mn>96.07</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.10</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf79">
<mml:math id="m84">
<mml:mrow>
<mml:mn>96.04</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.09</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">DenseNet</td>
<td align="center">
<inline-formula id="inf80">
<mml:math id="m85">
<mml:mrow>
<mml:mn>96.04</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.14</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf81">
<mml:math id="m86">
<mml:mrow>
<mml:mn>96.01</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.18</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf82">
<mml:math id="m87">
<mml:mrow>
<mml:mn>96.04</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.11</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf83">
<mml:math id="m88">
<mml:mrow>
<mml:mn>96.02</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.10</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">VGG</td>
<td align="center">
<inline-formula id="inf84">
<mml:math id="m89">
<mml:mrow>
<mml:mn>95.82</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.18</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf85">
<mml:math id="m90">
<mml:mrow>
<mml:mn>95.77</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.22</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf86">
<mml:math id="m91">
<mml:mrow>
<mml:mn>95.82</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.15</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf87">
<mml:math id="m92">
<mml:mrow>
<mml:mn>95.78</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.14</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">VIT</td>
<td align="center">
<inline-formula id="inf88">
<mml:math id="m93">
<mml:mrow>
<mml:mn>94.27</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.25</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf89">
<mml:math id="m94">
<mml:mrow>
<mml:mn>94.17</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.30</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf90">
<mml:math id="m95">
<mml:mrow>
<mml:mn>94.27</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.22</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf91">
<mml:math id="m96">
<mml:mrow>
<mml:mn>94.18</mml:mn>
<mml:mo>%</mml:mo>
<mml:mo>&#xb1;</mml:mo>
<mml:mn>0.21</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Bold values indicate the best performance metrics among the compared models.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>While the overall accuracy improvement is incremental compared to MobileNet V2 (96.07% &#xb1; 0.12%), a more distinct advantage is observed in lineage-specific performance. To provide a more comprehensive assessment, <xref ref-type="table" rid="T5">Table 5</xref> compares the per-class efficacy of CG-RecNet against both the baseline and all other SOTA architectures. As demonstrated, the superior performance of CG-RecNet is underscored by its ability to consistently outperform all competitive models across every neural lineage.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>Per-class diagnostic performance and lineage classification efficacy.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Class id</th>
<th align="center">Cell type</th>
<th align="center">Support</th>
<th align="center">ViT</th>
<th align="center">VGG</th>
<th align="center">DenseNet</th>
<th align="center">MobileNet V2</th>
<th align="center">Baseline</th>
<th align="center">CG-RecNet</th>
<th align="center">CG-RecNet improvement</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">0</td>
<td align="center">Astrocytes</td>
<td align="center">8,320</td>
<td align="center">95.80%</td>
<td align="center">97.82%</td>
<td align="center">98.13%</td>
<td align="center">98.15%</td>
<td align="center">97.40%</td>
<td align="center">98.34%</td>
<td align="center">
<bold>&#x2b;0.94%</bold>
</td>
</tr>
<tr>
<td align="center">1</td>
<td align="center">Oligodendrocytes</td>
<td align="center">4,153</td>
<td align="center">81.15%</td>
<td align="center">85.12%</td>
<td align="center">86.45%</td>
<td align="center">87.20%</td>
<td align="center">83.81%</td>
<td align="center">89.75%</td>
<td align="center">
<bold>&#x2b;5.94%</bold>
</td>
</tr>
<tr>
<td align="center">2</td>
<td align="center">Neurons</td>
<td align="center">18,661</td>
<td align="center">94.60%</td>
<td align="center">96.10%</td>
<td align="center">96.33%</td>
<td align="center">96.55%</td>
<td align="center">95.53%</td>
<td align="center">96.99%</td>
<td align="center">
<bold>&#x2b;1.46%</bold>
</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Bold values indicate the best performance metrics among the compared models.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>Notably, our model achieves a substantial &#x2b;5.94% F1-Score improvement in the challenging minority Oligodendrocyte lineage (F1 &#x3d; 89.75%) compared to the baseline (83.81%), and maintains a clear margin over other advanced models such as MobileNet V2 (87.20%) and DenseNet (86.45%). This gain is particularly significant as oligodendrocytes are crucial for remyelination and the treatment of neurological diseases like Multiple Sclerosis, a minority class that is morphologically difficult to identify. Alongside this, the model showed strong performance in Astrocytes (F1 &#x3d; 98.34%) and Neurons (F1 &#x3d; 96.99%), exceeding the diagnostic precision of all benchmarked architectures. This differential success confirms that the synergistic integration of the attention module and the Gated CNN effectively extracts highly discriminative morphological features, ensuring high precision across all three biologically critical lineages while effectively overcoming the classification bias inherent in generic models.</p>
<p>In addition to benchmarking against general SOTA architectures (<xref ref-type="table" rid="T4">Table 4</xref>), we critically evaluated CG-RecNet against the established domain-specific method: the Xception-based model by <xref ref-type="bibr" rid="B46">Zhu et al. (2021)</xref>, which first validated the potential for label-free NSC prediction using the same core dataset. The original Xception-based model achieved an accuracy of 92.3% on comparable brightfield test data. CG-RecNet&#x2019;s overall accuracy of 96.40% represents a substantial performance margin over this foundational domain-specific benchmark. This significant improvement stems from CG-RecNet&#x2019;s architectural advancements, which directly address the core limitations of standard CNNs like Xception. While the Xception-based model relies on depthwise separable convolutions to focus on local features and lacks dedicated noise control, CG-RecNet introduces two complementary mechanisms: the LinAngular-XCA Fusion Module to capture long-range morphological dependencies and global context, and the Gated CNN Block to act as a dynamic, learnable filter for noise suppression and local texture refinement. This synergistic, hybrid design proves essential for extracting the fine-grained morphological features necessary for high-precision classification in complex brightfield environments, confirming that CG-RecNet provides a significant architectural and predictive advance over the established methodology.</p>
</sec>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Qualitative interpretability analysis via grad-CAM</title>
<p>To explore the visual focus of CG-RecNet during the classification of neural stem cell differentiation, we conducted a qualitative examination using Gradient-weighted Class Activation Mapping (Grad-CAM). This method provides a visual approximation of the image regions that contribute most significantly to the model&#x2019;s categorical predictions.</p>
<p>As shown in <xref ref-type="fig" rid="F8">Figure 8</xref>, the heatmaps generated by CG-RecNet indicate that the model&#x2019;s attention is primarily concentrated on regions characterized by high pixel intensity variations and specific morphological textures within the brightfield imagery. For instance, in neuronal samples, the activation areas often align with the elongated structures and high-contrast boundaries of the cells.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Visualization of class activation mappings (Grad-CAM). Representative visualization results for Astrocytes (0), Oligodendrocytes (1), and Neurons (2). The heatmaps qualitatively illustrate the focus areas of the CG-RecNet model. Scale bar &#x3d; 10 um.</p>
</caption>
<graphic xlink:href="fcell-14-1767574-g008.tif">
<alt-text content-type="machine-generated">Three columns show visualizations of single cells with corresponding heatmaps and segmentation masks for astrocytes, oligodendrocytes, and neurons, labeled beneath each column, illustrating differences in cell morphology and computational classification.</alt-text>
</graphic>
</fig>
<p>It is important to emphasize that this analysis represents a qualitative method to visualize feature importance rather than a definitive biological validation of cellular structures. While the heatmaps suggest that the model prioritizes relevant morphological regions, these observations are based on a limited set of representative samples. The alignment between the model&#x2019;s attention and visual cellular features serves as a diagnostic aid for inspecting the decision-making rationale, demonstrating that the network captures salient phenotypic patterns to differentiate between Astrocyte, Oligodendrocyte, and Neuron lineages.</p>
<p>The figure presents representative visualization results for three distinct stem cell categories, sequentially labeled as (0), (1), and (2). The top row displays the original microscopic images, while the lower rows illustrate the corresponding attention heatmaps. These heatmaps visually delineate the focus areas of the CG-RecNet model, where high-activation regions (highlighted in red and yellow) visually align with cover the morphological structures of the stem cells. This indicates that the model effectively prioritizes relevant biological features over background noise during classification, thereby revealing the model&#x2019;s decision-making rationale and validating its interpretability in cellular analysis.</p>
<p>As discussed in the limitations, while Grad-CAM provides visual evidence of the model&#x2019;s focus, these findings remain qualitative. The heatmaps identify salient pixel intensity patterns rather than providing a quantitative biological correlation, a distinction that is crucial for interpreting the model&#x2019;s decision-making rationale.</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<sec id="s4-1">
<label>4.1</label>
<title>Overview and rationale for the CG-RecNet framework</title>
<p>This study validates CG-RecNet, a specialized hybrid deep learning framework engineered for the accurate, non-invasive, and label-free multi-class classification of Neural Stem Cell (NSC) differentiation lineages. Precise monitoring of neurogenesis is paramount for translational regenerative medicine, yet the reliance on immunofluorescent staining remains a significant bottleneck, introducing cellular toxicity and precluding longitudinal analysis. CG-RecNet addresses this methodological challenge by extracting fine-grained, lineage-specific morphological features directly from ubiquitous brightfield microscopy images.</p>
<p>The framework&#x2019;s performance is rooted in its deliberate architecture, designed to overcome two primary technical difficulties inherent in label-free cellular imaging: the low contrast and high noise of the background, and the visual ambiguity in distinguishing closely related cell types. By integrating the hierarchical feature extraction of a ResNet50 backbone with two dedicated modules&#x2014;the LinAngular Cross-Channel Attention (LinAngular-XCA) Fusion Module for global context modeling and the Gated Convolutional Neural Network (GatedCNN) Block for local feature refinement&#x2014;CG-RecNet establishes a new benchmark for accuracy in this domain.</p>
</sec>
<sec id="s4-2">
<label>4.2</label>
<title>Comparative performance analysis and architectural superiority</title>
<p>The empirical results demonstrate the enhanced predictive capacity of CG-RecNet on the internal dataset, achieving an overall accuracy of 96.40%. As illustrated in <xref ref-type="table" rid="T3">Table 3</xref>, the 1.82% accuracy gain over the ResNet50 baseline requires a &#x223c;9.8% increase in parameter count. While VGG-16 is significantly larger, it belongs to an older architectural paradigm and may not serve as a direct benchmark for modern deployment. For applications where resource constraints are paramount, the baseline model may be preferable as it provides the &#x2018;biggest bang for the buck,&#x2019; yielding only a 1.8% lower accuracy for a more compact model size. The additional complexity of CG-RecNet is justified primarily in scenarios where high-precision identification of minority lineages, such as oligodendrocytes, is the critical requirement. This efficiency is critical for laboratory deployment, offering an optimal trade-off between SOTA-level precision and the computational accessibility required for standard workstations.</p>
<p>Specifically, CG-RecNet surpassed established CNNs like VGG and MobileNet V2, and showed competitive advantages over modern models such as Vision Transformer (ViT) and DenseNet. For instance, the ViT model yielded a comparatively lower accuracy of 94.27% &#xb1; 0.25%. This deficit can be attributed to the ViT&#x2019;s inherent challenge in capturing the high-frequency local texture and fine cellular process features crucial for cell typing, owing to its lack of explicit convolutional inductive bias. In contrast, CG-RecNet maintains the benefits of convolution while enhancing global perception.</p>
<p>Our model&#x2019;s marginal yet critical improvement over the optimized DenseNet (96.04% &#xb1; 0.14% accuracy) is attributable to the strategic functional separation within our hybrid design. DenseNet may not optimally filter out the non-informative background artifacts common in brightfield images. CG-RecNet&#x2019;s integrated Gated CNN Block acts as a selective noise filter, refining local feature maps to retain biologically relevant information, thereby increasing the signal-to-noise ratio of the input features.</p>
<p>Furthermore, CG-RecNet achieved a &#x2b;4.1% performance margin compared to the domain-specific benchmark using an Xception-based architecture (Zhu et al., 2021). This suggests that architectures relying primarily on local operations struggle to integrate the long-range morphological dependencies (e.g., the extent of neurite outgrowth) essential for high-fidelity classification. The introduction of the LinAngular-XCA Fusion Module specifically resolves this by efficiently capturing non-local, long-range cellular morphology features.</p>
</sec>
<sec id="s4-3">
<label>4.3</label>
<title>Robustness against class imbalance and biological significance</title>
<p>A key finding of this study is the high classification efficacy of CG-RecNet on imbalanced NSC datasets, particularly regarding minority lineages. Datasets derived from biological differentiation often exhibit natural skewness, particularly regarding terminally differentiated cell types. In our case, the oligodendrocyte lineage constitutes a critical but minority class. Conventional deep learning models frequently suffer from classification bias towards the majority classes.</p>
<p>Notably, CG-RecNet achieved a &#x2b;5.94% F1-Score improvement for the critical Oligodendrocyte lineage (F1-Score of 89.75%) compared to the Baseline model (83.81%). This result represents the primary contribution of our work. Biologically, oligodendrocytes are the &#x201c;minority class&#x201d; and are notoriously difficult to distinguish from background noise due to their subtle morphology compared to the abundant neurons. However, in the context of regenerative medicine for demyelinating diseases (e.g., Multiple Sclerosis), the ability to accurately identify these rare therapeutic cells is the critical bottleneck, far more valuable than marginal gains in detecting already abundant neurons. Standard models often sacrifice the accuracy of these minority classes to achieve a high &#x201c;average&#x201d; score. By selectively suppressing background artifacts through the Gated CNN, CG-RecNet effectively &#x201c;closes the gap&#x201d; on this hardest class, offering a diagnostic reliability that generic models may not consistently provide.</p>
</sec>
<sec id="s4-4">
<label>4.4</label>
<title>Interpretability and translational potential</title>
<p>Addressing the &#x201c;black box&#x201d; challenge in biological applications, CG-RecNet incorporates <italic>post hoc</italic> interpretability through Grad-CAM visualization. By generating activation maps, we provided a rationale for the model&#x2019;s decision-making process. As illustrated in <xref ref-type="fig" rid="F8">Figure 8</xref>, the model consistently focuses on biologically relevant morphological hallmarks&#x2014;such as the length of neurite extensions in neurons, or the textured appearance of the soma in glial cells&#x2014;that align closely with established histological criteria used by human experts.</p>
<p>This interpretability transforms CG-RecNet from a purely predictive tool into a transparent diagnostic aid. It facilitates human-AI collaboration for validating observed biological phenomena and accelerating phenotypic drug screening (<xref ref-type="bibr" rid="B1">Bekhite and Schulze, 2021</xref>). While molecular assays remain the gold standard, CG-RecNet serves as a scalable, cost-effective surrogate marker for automated, routine monitoring in culture.</p>
</sec>
<sec id="s4-5">
<label>4.5</label>
<title>Limitations and future directions</title>
<p>Despite the encouraging results, this study acknowledges methodological limitations.</p>
<p>First, the model was trained and validated on a single publicly available dataset derived from rat embryonic NSCs. While we employed a stratified cross-validation strategy to mitigate overfitting, the lack of an external, independent dataset (out-of-context validation) limits the assessment of the model&#x2019;s generalization capability. Although our architectural principles of global-local fusion are designed to handle high noise, cellular heterogeneity among human iPSC lines (<xref ref-type="bibr" rid="B22">Kilpinen et al., 2017</xref>) and variations in imaging protocols across different laboratories remain significant challenges for biological translation. Future work will focus on expanding the training cohort to include multi-institutional data to rigorously verify cross-platform generalization.</p>
<p>Second, the data utilized in this study were acquired via Imaging Flow Cytometry (IFC), which provides high-throughput, pre-segmented single-cell images. Consequently, the current framework focuses on phenotypic classification and does not address the challenges of cell segmentation or regional identification required for standard adherent cell microscopy. Adapting CG-RecNet for <italic>in situ</italic> microscopy applications would necessitate the integration of an additional upstream segmentation module.</p>
<p>Third, the current study utilizes fixed time-point brightfield images and does not yet incorporate the continuous temporal dynamics inherent to live-cell differentiation. Cell fate is a dynamic trajectory, and leveraging temporal information can enhance the prediction of differentiation outcomes at earlier stages. Therefore, a crucial direction involves integrating continuous video data into a spatiotemporal deep learning framework. This will allow CG-RecNet to analyze differentiation kinetics, contributing to the development of a holistic intelligent monitoring system for practical regenerative medicine research.</p>
<p>Fourth, while the Grad-CAM visualizations provide valuable insights into the model&#x2019;s decision-making process, this interpretability analysis remains primarily qualitative. Due to the high-throughput nature of the imaging flow cytometry dataset and the inherent complexity of defining pixel-level spatial ground truths for all morphological artifacts, a large-scale quantitative evaluation of heatmap accuracy was not performed in this study. Future research will aim to develop standardized quantitative metrics to further validate the precision of these attention-based focus areas in label-free biological contexts.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This study successfully introduces and validates CG-RecNet, a reliable, hybrid deep learning architecture specifically designed for the accurate, label-free multi-class classification of neural stem cell differentiation lineages. By synergistically combining the hierarchical feature extraction capabilities of ResNet50 with the enhanced global context modeling of the LinAngular-XCA Fusion Module and the noise-suppression mechanisms of Gated CNN Blocks, the proposed model effectively addresses the inherent challenges of distinguishing subtle morphological phenotypes in complex brightfield microscopy images. Our extensive empirical evaluation on a diverse dataset confirms the reliable diagnostic performance of CG-RecNet, which achieved an overall accuracy of 96.40% and an average AUC of 0.9979. This performance margin surpasses that of several established baseline architectures, including advanced models like ViT and DenseNet. Furthermore, the strategic integration of Grad-CAM visualizations successfully mitigates the &#x201c;black-box&#x201d; nature of the deep learning model, providing transparent visual evidence of the specific cell morphological features driving the prediction. This visualization provides qualitative evidence of the morphological features driving the classification, suggesting the potential for enhanced diagnostic transparency in automated quality control systems. Future research will focus on extending the model&#x2019;s applicability by validating CG-RecNet on large-scale, multi-institutional human iPSC-derived datasets to ensure reliable generalization across diverse cell lines and imaging platforms. Additionally, we aim to explore the integration of temporal dynamic data, extending the model to analyze continuous video streams of cell growth. This next step is essential for enabling the prediction of differentiation trajectories at earlier stages, thereby contributing to the development of a holistic intelligent monitoring system crucial for the advancement of regenerative medicine manufacturing.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="s13">Supplementary Material</xref>, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The manuscript presents research on animals that do not require ethical approval for their study.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>QL: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. FL: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. JZ: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. XZ: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. CG: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. JL: Conceptualization, Data curation, Formal Analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s13">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fcell.2026.1767574/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fcell.2026.1767574/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.docx" id="SM1" mimetype="application/docx" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bekhite</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Schulze</surname>
<given-names>P. C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Human induced pluripotent stem cell as a disease modeling and drug development Platform-A cardiac perspective</article-title>. <source>Cells</source> <volume>10</volume> (<issue>12</issue>), <fpage>3483</fpage>. <pub-id pub-id-type="doi">10.3390/cells10123483</pub-id>
<pub-id pub-id-type="pmid">34943991</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bradbury</surname>
<given-names>E. J.</given-names>
</name>
<name>
<surname>Burnside</surname>
<given-names>E. R.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Moving beyond the glial scar for spinal cord repair</article-title>. <source>Nat. Commun.</source> <volume>10</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1038/s41467-019-11707-7</pub-id>
<pub-id pub-id-type="pmid">31462640</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Christiansen</surname>
<given-names>E. M.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>S. J.</given-names>
</name>
<name>
<surname>Ando</surname>
<given-names>D. M.</given-names>
</name>
<name>
<surname>Javaherian</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Skibinski</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Lipnick</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>
<italic>In silico</italic> labeling: predicting fluorescent labels in unlabeled images</article-title>. <source>Cell</source> <volume>173</volume> (<issue>3</issue>), <fpage>792</fpage>&#x2013;<lpage>803</lpage>. <pub-id pub-id-type="doi">10.1016/j.cell.2018.03.040</pub-id>
<pub-id pub-id-type="pmid">29656897</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cieri</surname>
<given-names>M. B.</given-names>
</name>
<name>
<surname>Ramos</surname>
<given-names>A. J.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Astrocytes, reactive astrogliosis, and glial scar formation in traumatic brain injury</article-title>. <source>Neural Regen. Res.</source> <volume>20</volume> (<issue>4</issue>), <fpage>973</fpage>&#x2013;<lpage>989</lpage>. <pub-id pub-id-type="doi">10.4103/nrr.nrr-d-23-02091</pub-id>
<pub-id pub-id-type="pmid">38989932</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Dauphin</surname>
<given-names>Y. N.</given-names>
</name>
<name>
<surname>Fan</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Auli</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Grangier</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Language modeling with gated convolutional networks</article-title>,&#x201d; in <source>International conference on machine learning</source> (<publisher-name>Brookline, MA: PMLR</publisher-name>), <fpage>933</fpage>&#x2013;<lpage>941</lpage>.</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>De Gioia</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Biella</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Citterio</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Rizzo</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Abati</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Nizzardo</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Neural stem cell transplantation for neurodegenerative diseases</article-title>. <source>Int. J. Mol. Sci.</source> <volume>21</volume> (<issue>9</issue>), <fpage>3103</fpage>. <pub-id pub-id-type="doi">10.3390/ijms21093103</pub-id>
<pub-id pub-id-type="pmid">32354178</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dimou</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>G&#xf6;tz</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Glial cells as progenitors and stem cells: new roles in the healthy and diseased brain</article-title>. <source>Physiol. Rev.</source> <volume>94</volume> (<issue>3</issue>), <fpage>709</fpage>&#x2013;<lpage>737</lpage>. <pub-id pub-id-type="doi">10.1152/physrev.00036.2013</pub-id>
<pub-id pub-id-type="pmid">24987003</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Dosovitskiy</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Beyer</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Kolesnikov</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>An image is worth 16x16 words: transformers for image recognition at scale</article-title>,&#x201d; in <source>International conference on learning representations (ICLR)</source>.</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>El-Nouby</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Touvron</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Caron</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>XCiT: cross-covariance image transformers</article-title>. <source>Adv. Neural Inf. Process. Syst. (NeurIPS)</source>. <pub-id pub-id-type="doi">10.48550/arXiv.2106.09681</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Esteva</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kuprel</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Novoa</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Ko</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Swetter</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Blau</surname>
<given-names>H. M.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Dermatologist-level classification of skin cancer with deep neural networks</article-title>. <source>Nature</source> <volume>542</volume> (<issue>7639</issue>), <fpage>115</fpage>&#x2013;<lpage>118</lpage>. <pub-id pub-id-type="doi">10.1038/nature21056</pub-id>
<pub-id pub-id-type="pmid">28117445</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fawcett</surname>
<given-names>J. W.</given-names>
</name>
<name>
<surname>Asher</surname>
<given-names>R. A.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>The glial scar and central nervous system repair</article-title>. <source>Brain Res. Bull.</source> <volume>49</volume> (<issue>6</issue>), <fpage>377</fpage>&#x2013;<lpage>391</lpage>. <pub-id pub-id-type="doi">10.1016/s0361-9230(99)00072-6</pub-id>
<pub-id pub-id-type="pmid">10483914</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feigin</surname>
<given-names>V. L.</given-names>
</name>
<name>
<surname>Nichols</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Alam</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Global, regional, and national burden of neurological disorders, 1990&#x2013;2016: a systematic analysis for the global burden of disease study 2016</article-title>. <source>Lancet Neurology</source> <volume>18</volume> (<issue>5</issue>), <fpage>459</fpage>&#x2013;<lpage>480</lpage>. <pub-id pub-id-type="doi">10.1016/S1474-4422(18)30499-X</pub-id>
<pub-id pub-id-type="pmid">30879893</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Franklin</surname>
<given-names>R. J.</given-names>
</name>
<name>
<surname>Bodini</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Goldman</surname>
<given-names>S. A.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Remyelination in the central nervous system</article-title>. <source>Cold Spring Harb. Perspect. Biol.</source> <volume>16</volume> (<issue>3</issue>), <fpage>a041371</fpage>. <pub-id pub-id-type="doi">10.1101/cshperspect.a041371</pub-id>
<pub-id pub-id-type="pmid">38316552</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fujitani</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Huddin</surname>
<given-names>N. S.</given-names>
</name>
<name>
<surname>Kawai</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kanie</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Kiyota</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Shimizu</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Morphology-based non-invasive quantitative prediction of the differentiation status of neural stem cells</article-title>. <source>J. Bioscience Bioengineering</source> <volume>124</volume> (<issue>3</issue>), <fpage>351</fpage>&#x2013;<lpage>358</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbiosc.2017.04.006</pub-id>
<pub-id pub-id-type="pmid">28465021</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gao</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Progress in stem cell therapy for spinal cord injury</article-title>. <source>Stem Cells Int.</source> <volume>2020</volume>, <fpage>2853650</fpage>. <pub-id pub-id-type="doi">10.1155/2020/2853650</pub-id>
<pub-id pub-id-type="pmid">33204276</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hauser</surname>
<given-names>S. L.</given-names>
</name>
<name>
<surname>Oksenberg</surname>
<given-names>J. R.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>The neurobiology of multiple sclerosis: genes, inflammation, and neurodegeneration</article-title>. <source>Neuron</source> <volume>52</volume> (<issue>1</issue>), <fpage>61</fpage>&#x2013;<lpage>76</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2006.09.011</pub-id>
<pub-id pub-id-type="pmid">17015227</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ren</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Deep residual learning for image recognition</article-title>. <source>Proc. IEEE Conf. Comput. Vis. Pattern Recognit. (CVPR)</source>, <fpage>770</fpage>&#x2013;<lpage>778</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2016.90</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Van Der Maaten</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Weinberger</surname>
<given-names>K. Q.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Densely connected convolutional networks</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>, <conf-loc>Honolulu, HI, USA</conf-loc>, <conf-date>21-26 July 2017</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>4700</fpage>&#x2013;<lpage>4708</lpage>.</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jia</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Qiao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ren</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Application of convolutional neural networks in medical images: a bibliometric analysis</article-title>. <source>Quantitative Imaging Med. Surg.</source> <volume>14</volume> (<issue>5</issue>), <fpage>3501</fpage>&#x2013;<lpage>3518</lpage>. <pub-id pub-id-type="doi">10.21037/qims-23-1600</pub-id>
<pub-id pub-id-type="pmid">38720828</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jucker</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Walker</surname>
<given-names>L. C.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Propagation and spread of pathogenic protein assemblies in neurodegenerative diseases</article-title>. <source>Nat. Neurosci.</source> <volume>21</volume> (<issue>10</issue>), <fpage>1341</fpage>&#x2013;<lpage>1349</lpage>. <pub-id pub-id-type="doi">10.1038/s41593-018-0238-6</pub-id>
<pub-id pub-id-type="pmid">30258241</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kalia</surname>
<given-names>L. V.</given-names>
</name>
<name>
<surname>Lang</surname>
<given-names>A. E.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Parkinson&#x27;s disease</article-title>. <source>Lancet</source> <volume>386</volume> (<issue>9996</issue>), <fpage>896</fpage>&#x2013;<lpage>912</lpage>. <pub-id pub-id-type="doi">10.1016/S0140-6736(14)61393-3</pub-id>
<pub-id pub-id-type="pmid">25904081</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kilpinen</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Goncalves</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Leha</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Afzal</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Alasoo</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ashford</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Common genetic variation drives molecular heterogeneity in human iPSCs</article-title>. <source>Nature</source> <volume>546</volume> (<issue>7658</issue>), <fpage>370</fpage>&#x2013;<lpage>375</lpage>. <pub-id pub-id-type="doi">10.1038/nature22403</pub-id>
<pub-id pub-id-type="pmid">28489815</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Krikid</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Rositi</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Vacavant</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>State-of-the-Art deep learning methods for microscopic image segmentation: applications to cells, nuclei, and tissues</article-title>. <source>J. Imaging</source> <volume>10</volume> (<issue>12</issue>), <fpage>311</fpage>. <pub-id pub-id-type="doi">10.3390/jimaging10120311</pub-id>
<pub-id pub-id-type="pmid">39728208</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Linnerbauer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wheeler</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Quintana</surname>
<given-names>F. J.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Astrocyte crosstalk in CNS inflammation</article-title>. <source>Neuron</source> <volume>108</volume> (<issue>4</issue>), <fpage>608</fpage>&#x2013;<lpage>622</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2020.08.012</pub-id>
<pub-id pub-id-type="pmid">32898475</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Litjens</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kooi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Bejnordi</surname>
<given-names>B. E.</given-names>
</name>
<name>
<surname>Setio</surname>
<given-names>A. A. A.</given-names>
</name>
<name>
<surname>Ciompi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Ghafoorian</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>A survey on deep learning in medical image analysis</article-title>. <source>Med. Image Anal.</source> <volume>42</volume>, <fpage>60</fpage>&#x2013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id>
<pub-id pub-id-type="pmid">28778026</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>The roles of neural stem cells in myelin regeneration and repair therapy after spinal cord injury</article-title>. <source>Stem Cell Res. Ther.</source> <volume>15</volume>, <fpage>204</fpage>. <pub-id pub-id-type="doi">10.1186/s13287-024-03825-x</pub-id>
<pub-id pub-id-type="pmid">38978125</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Mao</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2022</year>). &#x201c;<article-title>A ConvNet for the 2020s</article-title>,&#x201d; in <source>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR)</source>, <fpage>11976</fpage>&#x2013;<lpage>11986</lpage>.</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Long</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Holtzman</surname>
<given-names>D. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Alzheimer disease: an update on pathobiology and treatment strategies</article-title>. <source>Cell</source> <volume>179</volume> (<issue>2</issue>), <fpage>312</fpage>&#x2013;<lpage>339</lpage>. <pub-id pub-id-type="doi">10.1016/j.cell.2019.09.001</pub-id>
<pub-id pub-id-type="pmid">31564456</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Deep learning in fluorescence imaging and analysis</article-title>. <source>J. Intelligent Med.</source> <volume>1</volume> (<issue>1</issue>), <fpage>42</fpage>&#x2013;<lpage>62</lpage>. <pub-id pub-id-type="doi">10.1002/jim4.17</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Martino</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Pluchino</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bonfanti</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Schwartz</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Brain regeneration in physiology and pathology: the immune signature driving therapeutic plasticity of neural stem cells</article-title>. <source>Physiol. Rev.</source> <volume>91</volume> (<issue>4</issue>), <fpage>1281</fpage>&#x2013;<lpage>1304</lpage>. <pub-id pub-id-type="doi">10.1152/physrev.00032.2010</pub-id>
<pub-id pub-id-type="pmid">22013212</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moen</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Bannon</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kudo</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Graf</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Covert</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Van Valen</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deep learning for cellular image analysis</article-title>. <source>Nat. Methods</source> <volume>16</volume> (<issue>12</issue>), <fpage>1233</fpage>&#x2013;<lpage>1246</lpage>. <pub-id pub-id-type="doi">10.1038/s41592-019-0403-1</pub-id>
<pub-id pub-id-type="pmid">31133758</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Sandler</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Howard</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>MobileNetV2: inverted residuals and linear bottlenecks</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</conf-name>, <conf-loc>Salt Lake City, UT, USA</conf-loc>, <conf-date>18-23 June 2018</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>4510</fpage>&#x2013;<lpage>4520</lpage>.</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shorten</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Khoshgoftaar</surname>
<given-names>T. M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A survey on image data augmentation for deep learning</article-title>. <source>J. Big Data</source> <volume>6</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.1186/s40537-019-0197-0</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Simonyan</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zisserman</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>Very deep convolutional networks for large-scale image recognition</article-title>,&#x201d; in <source>International conference on learning representations (ICLR)</source>.</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Siracusa</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Fusco</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Cuzzocrea</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Astrocytes: role and functions in brain pathologies</article-title>. <source>Front. Pharmacol.</source> <volume>10</volume>, <fpage>1114</fpage>. <pub-id pub-id-type="doi">10.3389/fphar.2019.01114</pub-id>
<pub-id pub-id-type="pmid">31611796</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sofroniew</surname>
<given-names>M. V.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Molecular dissection of reactive astrogliosis and glial scar formation</article-title>. <source>Trends Neurosci.</source> <volume>32</volume> (<issue>12</issue>), <fpage>638</fpage>&#x2013;<lpage>647</lpage>. <pub-id pub-id-type="doi">10.1016/j.tins.2009.08.002</pub-id>
<pub-id pub-id-type="pmid">19782411</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Varma</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Simon</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Bias in error estimation when using cross-validation for model selection</article-title>. <source>BMC Bioinformatics</source> <volume>7</volume> (<issue>1</issue>), <fpage>91</fpage>. <pub-id pub-id-type="doi">10.1186/1471-2105-7-91</pub-id>
<pub-id pub-id-type="pmid">16504092</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vaswani</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Shazeer</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Parmar</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Attention is all you need</article-title>. <source>Adv. Neural Inf. Process. Syst.</source> <volume>30</volume>. <pub-id pub-id-type="doi">10.48550/arXiv.1706.03762</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Warnock</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Toomey</surname>
<given-names>L. M.</given-names>
</name>
<name>
<surname>Wright</surname>
<given-names>A. J.</given-names>
</name>
<name>
<surname>Fisher</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Won</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Anyaegbu</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Damage mechanisms to oligodendrocytes and white matter in central nervous system injury: the Australian context</article-title>. <source>J. Neurotrauma</source> <volume>37</volume> (<issue>5</issue>), <fpage>739</fpage>&#x2013;<lpage>769</lpage>. <pub-id pub-id-type="doi">10.1089/neu.2019.6890</pub-id>
<pub-id pub-id-type="pmid">32027208</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Woo</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>J. Y.</given-names>
</name>
<name>
<surname>Kweon</surname>
<given-names>I. S.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>CBAM: convolutional block attention module</article-title>,&#x201d; in <source>Proceedings of the European conference on computer vision</source> (<publisher-name>Cham, Switzerland: Springer</publisher-name>), <fpage>3</fpage>&#x2013;<lpage>19</lpage>.</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xue</surname>
<given-names>C. R.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>M. Z.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>Y. Y.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>H. J.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Tracking neural stem cells <italic>in vivo:</italic> achievements and limitations</article-title>. <source>Stem Cell Rev. Rep.</source> <volume>18</volume> (<issue>5</issue>), <fpage>1774</fpage>&#x2013;<lpage>1788</lpage>. <pub-id pub-id-type="doi">10.1007/s12015-022-10333-z</pub-id>
<pub-id pub-id-type="pmid">35122628</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Lv</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Deng</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>A wavelet-guided transformer approach for autofocus in brightfield biological microscopy</article-title>. <source>Sci. Rep.</source> <volume>15</volume> (<issue>1</issue>), <fpage>25521</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-11037-3</pub-id>
<pub-id pub-id-type="pmid">40665153</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>S. K.</given-names>
</name>
<name>
<surname>Greenspan</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Davatzikos</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Duncan</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Van Ginneken</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Madabhushi</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>A review of deep learning in medical imaging: imaging traits, technology trends, case studies with progress highlights, and future promises</article-title>. <source>Proc. IEEE</source> <volume>109</volume> (<issue>5</issue>), <fpage>820</fpage>&#x2013;<lpage>838</lpage>. <pub-id pub-id-type="doi">10.1109/jproc.2021.3054390</pub-id>
<pub-id pub-id-type="pmid">37786449</pub-id>
</mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhuang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Weng</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Gong</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2023a</year>). <article-title>Attention multi-hop graph and multi-scale convolutional fusion network for hyperspectral image classification</article-title>. <source>IEEE Trans. Geoscience Remote Sens.</source> <volume>61</volume>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1109/tgrs.2023.3265879</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhuang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Weng</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Gong</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2023b</year>). <article-title>Attention multihop graph and multiscale convolutional fusion network for hyperspectral image classification</article-title>. <source>IEEE Trans. Geoscience Remote Sens.</source> <volume>61</volume>, <fpage>1</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1109/TGRS.2023.3265879</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Deep learning-based predictive identification of neural stem cell differentiation</article-title>. <source>Nat. Commun.</source> <volume>12</volume> (<issue>1</issue>), <fpage>2614</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-021-22758-0</pub-id>
<pub-id pub-id-type="pmid">33972525</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/436391/overview">Gianluca Carnevale</ext-link>, University of Modena and Reggio Emilia, Italy</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1423906/overview">Goran Sedmak</ext-link>, University of Zagreb, Croatia</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1835833/overview">William Buchser</ext-link>, Washington University in St. Louis, United States</p>
</fn>
</fn-group>
</back>
</article>