<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2026.1743039</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Attention-enhanced segmentation network for automated cerebral microbleed detection and burden assessment</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Cho</surname>
<given-names>Kwon Hwi</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3220026"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Jeon</surname>
<given-names>Jonghyun</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3387174"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Seonggyu</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1052329"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Young Seo</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2934438"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Yu-Mi</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1919074"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Mi Kyung</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1752370"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Shin</surname>
<given-names>Min-Ho</given-names>
</name>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chung</surname>
<given-names>Insung</given-names>
</name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3387020"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Koh</surname>
<given-names>Sang Baek</given-names>
</name>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Hyeon Chang</given-names>
</name>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Park</surname>
<given-names>Chae Jung</given-names>
</name>
<xref ref-type="aff" rid="aff9"><sup>9</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2358048"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Lee</surname>
<given-names>Jong-Min</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff10"><sup>10</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/193559"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Artificial Intelligence, Hanyang University</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Neurology, Hanyang University College of Medicine</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Electronic Engineering, Hanyang University</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Preventive Medicine, Hanyang University College of Medicine</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff5"><label>5</label><institution>Department of Preventive Medicine, Chonnam National University Medical School</institution>, <city>Gwangju</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff6"><label>6</label><institution>Department of Occupational and Environmental Medicine, Keimyung University School of Medicine</institution>, <city>Daegu</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff7"><label>7</label><institution>Department of Preventive Medicine and Institute of Occupational Medicine, Yonsei Wonju College of Medicine</institution>, <city>Wonju</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff8"><label>8</label><institution>Department of Preventive Medicine, Yonsei University College of Medicine</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff9"><label>9</label><institution>Department of Radiology, Research Institute of Radiological Science, Yongin Severance Hospital, Yonsei University Health System</institution>, <city>Yongin</city>, <country country="kr">Republic of Korea</country></aff>
<aff id="aff10"><label>10</label><institution>Department of Biomedical Engineering, Hanyang University</institution>, <city>Seoul</city>, <country country="kr">Republic of Korea</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Jong-Min Lee, <email xlink:href="mailto:ljm@hanyang.ac.kr">ljm@hanyang.ac.kr</email>; Chae Jung Park, <email xlink:href="mailto:CHELSEAPRK@yuhs.ac.kr">CHELSEAPRK@yuhs.ac.kr</email></corresp>
<fn id="fn0001" fn-type="equal">
<label>&#x2020;</label>
<p>These authors have contributed equally to this work and share first authorship</p>
</fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-04">
<day>04</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1743039</elocation-id>
<history>
<date date-type="received">
<day>10</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>30</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Cho, Jeon, Kim, Kim, Kim, Kim, Shin, Chung, Koh, Kim, Park and Lee.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Cho, Jeon, Kim, Kim, Kim, Kim, Shin, Chung, Koh, Kim, Park and Lee</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-04">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec id="sec1001">
<title>Introduction</title>
<p>Cerebral microbleeds (CMBs) are small hemorrhagic lesions visible as hypointense foci on susceptibility-sensitive MRI and are established biomarkers of stroke risk and amyloid-related imaging abnormalities (ARIA-H) in patients receiving anti-amyloid therapy. However, automated detection remains challenging because true CMBs closely resemble veins, calcifications, and susceptibility artifacts. This visual ambiguity results in a persistent precision&#x2013;recall trade-off, where models optimized for high sensitivity tend to generate excessive false positives, while precision-focused models risk missing clinically relevant lesions. To address this limitation, we propose an attention-enhanced segmentation framework designed to suppress confounding activations while preserving lesion sensitivity.</p>
</sec>
<sec id="sec1002">
<title>Methods</title>
<p>We developed RLK-UNet with Convolutional Block Attention Modules (CBAM), a single-stage encoder&#x2013;decoder architecture that redefines skip connections as context-filtered pathways. The encoder incorporates large 13&#x00D7;13 residual local kernel (RLK) convolutions to capture broad contextual information for distinguishing spherical microbleeds from elongated vascular structures. CBAM modules are embedded in all skip connections to selectively enhance lesion-relevant features and suppress irrelevant background responses before feature fusion. The model was trained and evaluated on a multi-site dataset of 506 T2&#x002A;-GRE and SWI scans, with lesion-level detection assessed using precision, recall, F1-score, and average false positives per scan. Subject-level burden estimation was further evaluated using ARIA-H severity intervals.</p>
</sec>
<sec id="sec1003">
<title>Results</title>
<p>The proposed model achieved state-of-the-art lesion-level performance, with a precision of 0.891, recall of 0.887, F1-score of 0.887, and a markedly reduced false positive rate of 0.83 per subject. Five-fold cross-validation demonstrated stable performance with minimal variance across splits. In lesions &#x2264;3 mm, the model maintained strong detection performance (F1-score 0.869) while effectively controlling false positives. Cross-modality evaluation between T2&#x002A;-GRE and SWI confirmed robust generalization. Ablation studies verified that CBAM significantly improved precision while preserving sensitivity, and Grad-CAM visualizations demonstrated more spatially focused and clinically interpretable attention patterns. Subject-level CMB counts strongly correlated with ground truth (Spearman <italic>&#x03C1;</italic> = 0.93), and severity classification aligned with ARIA-H intervals.</p>
</sec>
<sec id="sec1004">
<title>Conclusion</title>
<p>RLK-UNet with CBAM provides a robust and interpretable solution for automated CMB detection by directly addressing false-positive propagation through attention-guided skip connections. The framework achieves balanced precision and sensitivity within a single-stage architecture and demonstrates reliable subject-level burden estimation aligned with clinically meaningful ARIA-H categories. These findings support its potential application in vascular risk stratification and treatment monitoring in patients undergoing anti-amyloid therapy.</p>
</sec>
</abstract>
<kwd-group>
<kwd>ARIA-H</kwd>
<kwd>attention mechanism</kwd>
<kwd>CBAM</kwd>
<kwd>cerebral microbleeds</kwd>
<kwd>segmentation</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Ministry of Food and Drug Safety</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100003569</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs2">
<funding-source id="sp2">
<institution-wrap>
<institution>Ministry of Health &#x0026; Welfare</institution>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs3">
<funding-source id="sp3">
<institution-wrap>
<institution>Ministry of Trade, Industry and Energy</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100003052</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs4">
<funding-source id="sp4">
<institution-wrap>
<institution>Ministry of Science and ICT</institution>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs5">
<funding-source id="sp5">
<institution-wrap>
<institution>Hanyang University</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100002380</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs6">
<funding-source id="sp6">
<institution-wrap>
<institution>Korea government (MSIT)</institution>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs7">
<funding-source id="sp7">
<institution-wrap>
<institution>National IT Industry Promotion Agency</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100003665</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the National IT Industry Promotion Agency (NIPA), an agency under the MSIT and with the support of the Daegu Digital Innovation Promotion Agency (DIP), the organization under the Daegu Metropolitan Government. This work was supported by Institute of Information &#x0026; communications Technology Planning &#x0026; Evaluation (IITP) grant funded by the Korea government (MSIT) [No. RS-2020-II201373, Artificial Intelligence Graduate School Program (Hanyang University)]. This work was supported by the Korea Medical Device Development Fund grant funded by the Korea government (the Ministry of Science and ICT, the Ministry of Trade, Industry and Energy, the Ministry of Health &#x0026; Welfare, the Ministry of Food and Drug Safety; Project Numbers: 2710077831 and RS-2023-00247272).</funding-statement>
</funding-group>
<counts>
<fig-count count="6"/>
<table-count count="8"/>
<equation-count count="5"/>
<ref-count count="24"/>
<page-count count="13"/>
<word-count count="9377"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Imaging Methods</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Cerebral microbleeds (CMBs) are small (&#x003C;10&#x202F;mm) perivascular hemosiderin deposits that appear as punctate hypointensities on susceptibility-sensitive magnetic resonance imaging (MRI), most commonly associated with hypertensive arteriopathy and cerebral amyloid angiopathy (<xref ref-type="bibr" rid="ref10">Greenberg et al., 2009</xref>; <xref ref-type="bibr" rid="ref5">Cordonnier and van der Flier, 2011</xref>; <xref ref-type="bibr" rid="ref8">Fazekas et al., 1999</xref>). Their presence has been linked to an increased risk of stroke, cognitive decline, and dementia, even in other asymptomatic individuals (<xref ref-type="bibr" rid="ref1">Akoudad et al., 2016</xref>; <xref ref-type="bibr" rid="ref11">Gregoire et al., 2009</xref>). Accurate detection of CMBs is therefore clinically critical, not only for assessing intracerebral hemorrhage risk in patients considered for anticoagulation therapy, but also for monitoring amyloid-related imaging abnormalities (ARIA-H), a frequent adverse event in anti-amyloid immunotherapy (<xref ref-type="bibr" rid="ref16">Lovelock et al., 2010</xref>; <xref ref-type="bibr" rid="ref12">Hampel et al., 2023</xref>). Consequently, the reliable and interpretable detection of CMBs is becoming increasingly important for vascular risk management and dementia care.</p>
<p>Early efforts to detect CMBs primarily relied on manual visual rating scales, such as the Microbleed Anatomical Rating Scale (MARS) and the Brain Observer MicroBleed Scale (BOMBS), in which radiologists assessed hypointense lesions on T2&#x002A;-weighted GRE or SWI images (<xref ref-type="bibr" rid="ref11">Gregoire et al., 2009</xref>; <xref ref-type="bibr" rid="ref4">Cordonnier et al., 2009</xref>). While these methods were clinically trusted, they were limited by inter-rater variability, inefficiency, and low sensitivity to subtle or small lesions. The higher sensitivity of SWI further increased the number of visible CMBs, exacerbating the manual burden and motivating the development of automated approaches.</p>
<p>Traditional machine learning (ML) methods represented the first attempts at automation, leveraging handcrafted features related to intensity, shape, and anatomical location. For example, Barnes et al. and Kuijf et al. combined candidate detection with Random Forest classifiers, while Ghafaryasl et al. used a support vector machine trained on shape and contextual features (<xref ref-type="bibr" rid="ref3">Barnes et al., 2011</xref>; <xref ref-type="bibr" rid="ref14">Kuijf et al., 2013</xref>; <xref ref-type="bibr" rid="ref9">Ghafaryasl et al., 2012</xref>). These approaches demonstrated the feasibility of automated detection but were hampered by poor generalizability across different imaging conditions and susceptibility to false positives in complex anatomical regions. To overcome these limitations, deep learning&#x2013;based models were introduced, particularly 3D convolutional neural networks (CNNs) that could exploit richer spatial context. Dou et al. pioneered a 3D patch-based CNN that improved sensitivity and better distinguished CMBs from confounding structures such as vessels and calcifications (<xref ref-type="bibr" rid="ref6">Dou et al., 2016</xref>).</p>
<p>More recently, detection-based deep learning frameworks such as YOLO and SSD have been applied to CMB analysis. Al-Masni et al. proposed a two-stage pipeline combining YOLOv2 with a CNN-based classifier (<xref ref-type="bibr" rid="ref2">Al-Masni et al., 2020</xref>), while Myung et al. enhanced YOLOv2 with cerebrospinal fluid (CSF) filtering as a post-processing step to reduce false positives (<xref ref-type="bibr" rid="ref17">Myung et al., 2021</xref>). Li et al. further developed an SSD-based model that leveraged ground truth annotations during training to enhance feature learning (<xref ref-type="bibr" rid="ref15">Li et al., 2021</xref>). Although these detection-based approaches demonstrated efficiency, their reliance on bounding boxes limited their ability to capture the precise morphology of CMBs, which are typically small and spherical.</p>
<p>Segmentation-based approaches, particularly those built on U-Net and its variants, have therefore become the preferred choice for automated CMB detection. U-Net&#x2019;s encoder&#x2013;decoder architecture enables multi-scale feature integration and pixel-level localization, which is crucial for delineating small, low-contrast lesions. Fan et al. demonstrated the effectiveness of multi-slice U-Net inputs for improving detection accuracy (<xref ref-type="bibr" rid="ref7">Fan et al., 2022</xref>), while Tsuchida et al. proposed SHIVA-CMB, a 3D U-Net trained on seven cohorts with diverse acquisition protocols, showing strong generalization across sites (<xref ref-type="bibr" rid="ref21">Tsuchida et al., 2024</xref>). Wei et al. introduced MMOC-Net, a two-stage architecture combining U-Net and a Full-Resolution Network (FRN), highlighting the importance of multi-scale fusion and hybrid loss functions for capturing small-object sensitivity (<xref ref-type="bibr" rid="ref23">Wei et al., 2022</xref>). Collectively, these studies confirm the value of segmentation-based methods, yet also reveal persistent trade-offs between sensitivity and precision. Models tuned for high recall often produce large numbers of false positives, whereas those optimized for precision risk missing clinically significant lesions.</p>
<p>A key limitation of conventional U-Net&#x2013;based frameworks lies in their skip connections, which indiscriminately transmit both lesion-relevant and irrelevant high-frequency features from the encoder to the decoder. This results in vascular textures, calcifications, and noise being propagated alongside true CMB signals, inflating false positives and undermining precision. To address this challenge, we reconceptualize skip connections as selective, context-filtered pathways rather than raw conduits. Specifically, our proposed framework integrates residual local kernel (RLK) convolutions with large receptive fields to capture broad contextual cues, together with Convolutional Block Attention Modules (CBAM) embedded in all skip connections. The RLK encoder provides contextual information to disambiguate spherical CMBs from elongated vascular structures, while CBAM selectively filters features before fusion into the decoder (<xref ref-type="bibr" rid="ref19">Son et al., 2023</xref>; <xref ref-type="bibr" rid="ref24">Woo et al., 2018</xref>). This design suppresses the propagation of confounding responses and amplifies lesion-relevant signals, alleviating the precision&#x2013;recall trade-off at its source.</p>
<p>Importantly, CBAM generates attention maps that provide transparent visual evidence of what the model emphasizes or suppresses, enhancing clinical interpretability and trust. Unlike prior two-stage or post-processing frameworks, our method achieves balanced improvements in sensitivity and precision within a single end-to-end architecture. Furthermore, we show that the model&#x2019;s subject-level predictions align with ARIA-H severity intervals, underscoring its translational relevance for monitoring treatment in patients receiving anti-amyloid therapies.</p>
<p>In summary, this study makes three key contributions. First, we redefine skip connections as context-aware, attention-gated pathways that directly address the root cause of false-positive propagation in CMB detection. Second, we propose a single-stage architecture that integrates large-kernel RLK encoding with CBAM-based skip filtering, improving precision without compromising sensitivity. Finally, we demonstrate the clinical applicability of our framework through accurate subject-level burden estimation aligned with ARIA-H severity categories, highlighting its potential for real-world deployment in dementia care and treatment monitoring.</p>
</sec>
<sec sec-type="methods" id="sec2">
<label>2</label>
<title>Methods</title>
<sec id="sec3">
<label>2.1</label>
<title>Data preprocessing</title>
<p>All input magnetic resonance imaging data underwent a standardized preprocessing pipeline to ensure consistency and improve model performance. First, N4ITK bias field correction was applied to reduce low-frequency intensity inhomogeneity, thereby enhancing tissue contrast and supporting more stable feature learning (<xref ref-type="bibr" rid="ref22">Tustison et al., 2010</xref>). Subsequently, non-brain tissues such as the skull and surrounding fat were removed using the Brain Extraction Tool (BET) from FSL (<xref ref-type="bibr" rid="ref13">Jenkinson et al., 2012</xref>). To optimize memory usage and computational efficiency, each image was tightly cropped around the brain region, and all slices were resampled to a fixed resolution of 512&#x202F;&#x00D7;&#x202F;512 pixels.</p>
<p>During training, various data augmentation techniques were applied to improve generalization and mitigate overfitting. Specifically, random geometric transformations including &#x00B1;15&#x00B0; rotations, horizontal flipping, and vertical flipping were used. To further focus the training on relevant anatomical areas, slices were cropped around non-zero mask regions, ensuring that lesion-containing areas were included in the input. Additionally, bezier curve-based nonlinear intensity distortion was introduced to simulate contrast variability across subjects and scanners, thus improving the model&#x2019;s robustness to imaging heterogeneity.</p>
<p>All images were standardized using z-score normalization to have zero mean and unit variance. The data were processed on a 2D slice-wise basis. For model input, each sample was constructed as a three-channel input by stacking the center slice with its immediate preceding and succeeding slices.</p>
<p>This input configuration is anatomically motivated, as cerebral microbleeds are small, focal lesions that typically extend across only a limited number of contiguous axial slices. Incorporating adjacent slices allows the model to capture local inter-slice continuity relevant to microbleed detection while remaining within a 2D-based framework. In contrast, using all slices along the z-dimension as input channels would introduce substantial variability in input dimensionality due to differences in slice thickness and acquisition protocols, potentially impairing generalization in multi-center datasets. The specific input configuration and how it integrates with the network are described in the following section.</p>
</sec>
<sec id="sec4">
<label>2.2</label>
<title>Network architecture</title>
<p>As illustrated in <xref ref-type="fig" rid="fig1">Figure 1</xref>, the proposed model adopts a modified encoder&#x2013;decoder architecture based on U-Net, designed specifically for the detection of cerebral microbleeds (CMBs). Although CMBs are three-dimensional anatomical entities, they typically appear as small, focal lesions spanning only a limited number of slices along the axial direction. Based on this observation, the network is built upon a 2D-based framework that effectively captures in-plane spatial features while maintaining robustness across heterogeneous imaging protocols.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Overall architecture of the proposed RLK-UNet with CBAM for cerebral microbleed (CMB) segmentation. <bold>(a)</bold> The network adopts an encoder&#x2013;decoder structure, where each encoder and decoder block incorporates residual local kernel (RLK) modules utilizing 13&#x202F;&#x00D7;&#x202F;13 convolutional filters to capture multi-scale context. CBAM, consisting of channel and spatial attention, is applied to all skip connections to enhance CMB-relevant features while suppressing irrelevant background activations. Attention gates are used to further refine the fused features passed from encoder to decoder. Multi-scale outputs are generated at three decoder stages to provide deep supervision and foreground highlighting. <bold>(b)</bold> The internal structure of CBAM is illustrated, showing the sequential application of channel attention (based on global average and max pooling followed by MLP and element-wise addition) and spatial attention (using convolution after concatenating pooled spatial maps). The model receives a three-channel input consisting of the target axial slice and its adjacent upper and lower slices, and outputs pixel-wise segmentation maps along with intermediate auxiliary outputs to improve learning effectiveness and interpretability.</p>
</caption>
<graphic xlink:href="fnins-20-1743039-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Neural network architecture diagram divided into two parts: panel a shows a multi-stage encoder-decoder pipeline for processing brain MRI images, incorporating input, convolution, and output blocks with defined upsampling, downsampling, skip connections, and CBAM modules, alongside multi-scale foreground highlighting; panel b presents a flowchart of the CBAM attention module, detailing channel and spatial attention branches using average pooling, max pooling, multilayer perceptrons, convolution, sigmoid activations, and element-wise addition or concatenation for generating output features.</alt-text>
</graphic>
</fig>
<p>The network integrates Residual Local Kernel (RLK) blocks and Convolutional Block Attention Modules (CBAM) to enhance discriminative feature learning and suppress irrelevant activations. The model is optimized for the segmentation of small and sparse CMBs, which are often challenging to distinguish due to their similarity to surrounding anatomical structures.</p>
<sec id="sec5">
<label>2.2.1</label>
<title>Encoder: deep feature extraction with large receptive fields</title>
<p>The encoder consists of four stages, each composed of two or more convolutional blocks. Unlike conventional U-Net architectures that typically use 3&#x202F;&#x00D7;&#x202F;3 or 5&#x202F;&#x00D7;&#x202F;5 kernels, our RLK blocks utilize large 13&#x202F;&#x00D7;&#x202F;13 kernels. These wide convolutional filters allow the network to capture a broader receptive field, enabling the extraction of more extensive contextual information around potential lesions (<xref ref-type="bibr" rid="ref19">Son et al., 2023</xref>). Given that typical cerebral microbleeds have a diameter of approximately 2&#x2013;10&#x202F;mm, corresponding to roughly 5&#x2013;20 voxels at the 512&#x202F;&#x00D7;&#x202F;512 in-plane resolution used in this study, the 13&#x202F;&#x00D7;&#x202F;13 kernel was selected to encompass the average spatial extent of microbleeds and their surrounding context, facilitating discrimination from linear vascular structures. This is particularly beneficial for distinguishing true CMBs from confounding structures such as veins and calcifications. Each block employs group normalization and GELU activation, and downsampling between stages is performed using strided convolutions. To improve generalization, stochastic depth via droppath is applied with increasing drop rates across encoder levels.</p>
</sec>
<sec id="sec6">
<label>2.2.2</label>
<title>Decoder and attention-based skip connections</title>
<p>The decoder mirrors the encoder structure and consists of upsampling modules followed by convolutional refinement blocks. Each upsampling stage restores spatial resolution and fuses the corresponding encoder features via skip connections. To prevent the transfer of irrelevant features through the skip paths, we incorporate CBAM into every skip connection. CBAM refines the encoder features through two sequential modules: channel attention and spatial attention (<xref ref-type="bibr" rid="ref24">Woo et al., 2018</xref>).</p>
<p>The channel attention map is computed as follows for an input feature map <inline-formula>
<mml:math id="M1">
<mml:mi>F</mml:mi>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mi>&#x211D;</mml:mi>
<mml:mrow>
<mml:mi>C</mml:mi>
<mml:mo>&#x00D7;</mml:mo>
<mml:mi>H</mml:mi>
<mml:mo>&#x00D7;</mml:mo>
<mml:mi>W</mml:mi>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula>:</p>
<disp-formula id="E1">
<mml:math id="M2">
<mml:msub>
<mml:mi>M</mml:mi>
<mml:mi>c</mml:mi>
</mml:msub>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>F</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi mathvariant="italic">MLP</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:mtext mathvariant="italic">Avgpool</mml:mtext>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>F</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>+</mml:mo>
<mml:mi mathvariant="italic">MLP</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:mtext mathvariant="italic">Maxpool</mml:mtext>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>F</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M3">
<mml:mi>&#x03C3;</mml:mi>
</mml:math>
</inline-formula> denotes the sigmoid function and MLP is a shared two-layer perceptron. The refined output <inline-formula>
<mml:math id="M4">
<mml:msup>
<mml:mi>F</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
</mml:math>
</inline-formula> is then passed through the spatial attention module, which computes:<disp-formula id="E2">
<mml:math id="M5">
<mml:msub>
<mml:mi>M</mml:mi>
<mml:mi>s</mml:mi>
</mml:msub>
<mml:mo stretchy="true">(</mml:mo>
<mml:msup>
<mml:mi>F</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>=</mml:mo>
<mml:mi>&#x03C3;</mml:mi>
<mml:mo stretchy="true">(</mml:mo>
<mml:msup>
<mml:mi>f</mml:mi>
<mml:mrow>
<mml:mn>7</mml:mn>
<mml:mo>&#x00D7;</mml:mo>
<mml:mn>7</mml:mn>
</mml:mrow>
</mml:msup>
<mml:mo stretchy="true">(</mml:mo>
<mml:mo stretchy="true">[</mml:mo>
<mml:mtext mathvariant="italic">Avgpool</mml:mtext>
<mml:mo stretchy="true">(</mml:mo>
<mml:msup>
<mml:mi>F</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo>;</mml:mo>
<mml:mtext mathvariant="italic">Maxpool</mml:mtext>
<mml:mo stretchy="true">(</mml:mo>
<mml:msup>
<mml:mi>F</mml:mi>
<mml:mo>&#x2032;</mml:mo>
</mml:msup>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">]</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</disp-formula></p>
<p>The output from the spatial attention is multiplied with the input to generate the final attention-refined skip feature. In addition to CBAM, attention gates are applied in parallel to further enhance the spatial selectivity of the skip connections by conditioning on the decoder&#x2019;s coarser outputs. This dual-attention mechanism helps suppress irrelevant activations and improves the segmentation precision.</p>
</sec>
<sec id="sec7">
<label>2.2.3</label>
<title>Multi-output deep supervision</title>
<p>The decoder generates auxiliary outputs at multiple stages to facilitate deep supervision. Each auxiliary output provides an intermediate lesion probability map at a different spatial scale. These outputs are used during training to propagate gradients more effectively, which is particularly important for cerebral microbleeds that are small in size and sparsely distributed.</p>
<p>By providing additional supervision signals at intermediate decoder levels, deep supervision helps stablize the training process and alleviates optimization difficulties commonly encountered in small-lesion segmentation tasks. While this strategy can lead to incremental improvements in final segmentation performance, its primary role in this work is to enhance training stability and improve gradient flow rather than to fundamentally alter the representational capacity of the network.</p>
<p>The final segmentation output is derived from the highest-resolution decoder stage and is supervised together with the auxiliary outputs using a combined loss function, with greater emphasis placed on the final output during training.</p>
</sec>
</sec>
<sec id="sec8">
<label>2.3</label>
<title>Loss function</title>
<p>To effectively supervise the segmentation of cerebral microbleeds (CMBs), which are typically small in size and sparsely distributed, we adopt a composite loss function that combines Dice Loss and Focal Loss. This formulation is designed to balance the need for high sensitivity with the suppression of false positives, addressing the class imbalance inherent in lesion segmentation tasks.</p>
<p>The Dice loss <inline-formula>
<mml:math id="M6">
<mml:msub>
<mml:mi>&#x2112;</mml:mi>
<mml:mtext mathvariant="italic">Dice</mml:mtext>
</mml:msub>
</mml:math>
</inline-formula> directly optimizes the overlap between the predicted and ground truth masks, and is particularly effective for handling small lesion regions:</p>
<disp-formula id="E3">
<mml:math id="M7">
<mml:msub>
<mml:mi mathvariant="script">L</mml:mi>
<mml:mtext mathvariant="italic">Dice</mml:mtext>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:msub>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>g</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mo>&#x2208;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:msub>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:msub>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:msub>
<mml:mi>g</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>+</mml:mo>
<mml:mo>&#x2208;</mml:mo>
</mml:mrow>
</mml:mfrac>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M8">
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> and <inline-formula>
<mml:math id="M9">
<mml:msub>
<mml:mi>g</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> denote the predicted probability and ground truth label for voxel <inline-formula>
<mml:math id="M10">
<mml:mi>i</mml:mi>
</mml:math>
</inline-formula>, respectively, and <inline-formula>
<mml:math id="M11">
<mml:mi>&#x03F5;</mml:mi>
</mml:math>
</inline-formula> is a small constant for numerical stability.</p>
<p>To further enhance the model&#x2019;s focus on hard-to-classify voxels, the Focal Loss <inline-formula>
<mml:math id="M12">
<mml:msub>
<mml:mi>&#x2112;</mml:mi>
<mml:mtext mathvariant="italic">Focal</mml:mtext>
</mml:msub>
</mml:math>
</inline-formula> is employed, defined as:</p>
<disp-formula id="E4">
<mml:math id="M13">
<mml:msub>
<mml:mi>&#x2112;</mml:mi>
<mml:mtext mathvariant="italic">Focal</mml:mtext>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>&#x03B1;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:msup>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
<mml:mi>&#x03B3;</mml:mi>
</mml:msup>
<mml:mo>log</mml:mo>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M14">
<mml:msub>
<mml:mi>p</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> is the predicted probability corresponding to the true class, <inline-formula>
<mml:math id="M15">
<mml:msub>
<mml:mi>&#x03B1;</mml:mi>
<mml:mi>t</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> is a balancing parameter, and <inline-formula>
<mml:math id="M16">
<mml:mi>&#x03B3;</mml:mi>
</mml:math>
</inline-formula> is the focusing parameter. In our experiments, we set <inline-formula>
<mml:math id="M17">
<mml:mi>&#x03B3;</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>2.0</mml:mn>
</mml:math>
</inline-formula> to emphasize difficult samples and reduce the influence of well-classified background voxels.</p>
<p>Importantly, although subjects without cerebral microbleeds (zero-CMB cases) were not explicitly included during training, the use of Focal loss effectively mitigates the dominance of background voxels by down-weighting easy negative samples. This design encourages the network to focus on hard-to-classify lesion candidates ans duppress spurious false positives, thereby alleviating class imbalance even in the absence of explicit negative-only subjects during training.</p>
<p>The final training loss <inline-formula>
<mml:math id="M18">
<mml:msub>
<mml:mi>&#x2112;</mml:mi>
<mml:mtext mathvariant="italic">total</mml:mtext>
</mml:msub>
</mml:math>
</inline-formula> is the weighted sum of Dice and Focal losses, computed across the main output and three auxiliary outputs from the decoder:</p>
<disp-formula id="E5">
<mml:math id="M19">
<mml:msub>
<mml:mi mathvariant="script">L</mml:mi>
<mml:mtext mathvariant="italic">total</mml:mtext>
</mml:msub>
<mml:mo>=</mml:mo>
<mml:munderover>
<mml:mo movablelimits="false">&#x2211;</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>=</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mn>4</mml:mn>
</mml:munderover>
<mml:mo stretchy="true">(</mml:mo>
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
<mml:mo>&#x22C5;</mml:mo>
<mml:mo stretchy="true">(</mml:mo>
<mml:msubsup>
<mml:mi mathvariant="script">L</mml:mi>
<mml:mtext mathvariant="italic">Dice</mml:mtext>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
</mml:msubsup>
<mml:mo>+</mml:mo>
<mml:msubsup>
<mml:mi mathvariant="script">L</mml:mi>
<mml:mtext mathvariant="italic">Focal</mml:mtext>
<mml:mrow>
<mml:mo stretchy="true">(</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo stretchy="true">)</mml:mo>
</mml:mrow>
</mml:msubsup>
<mml:mo stretchy="true">)</mml:mo>
<mml:mo stretchy="true">)</mml:mo>
</mml:math>
</disp-formula>
<p>where <inline-formula>
<mml:math id="M20">
<mml:msub>
<mml:mi>&#x03BB;</mml:mi>
<mml:mi>k</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> denotes the weight assigned to the output at the <italic>k</italic>-th decoder level. In our implementation, the weights were set to <italic>&#x03BB;</italic>&#x202F;=&#x202F;{0.4,0.3,0.2,0.1} from the highest-resolution (final) output to the deepest auxiliary output, respectively. This weighting scheme places greater emphasis on the final prediction while allowing auxiliary outputs to contribute to training stability through deep supervision.</p>
</sec>
</sec>
<sec id="sec9">
<label>3</label>
<title>Experiments</title>
<sec id="sec10">
<label>3.1</label>
<title>Data</title>
<p>This study utilized a total of 506 brain MRI scans collected from three sources: the MICCAI2021 Challenge dataset, a multi-center cohort referred to as Cavas, and clinical data from Yongin Severance Hospital. The combined dataset includes both T2&#x002A;-weighted Gradient Echo (T2&#x002A;-GRE) and Susceptibility Weighted Imaging (SWI) sequences, reflecting real-world variability in acquisition protocols and scanner manufacturers.For evaluation, a test set comprising 72 subjects was constructed, while the remaining scans were used for training. Notably, the test set included subjects without cerebral microbleeds, ensuring a comprehensive assessment of the model&#x2019;s ability to distinguish between positive and negative cases.</p>
<sec id="sec11">
<label>3.1.1</label>
<title>MICCAI2021 challenge dataset</title>
<p>The MICCAI2021 dataset comprises 72 scans, of which 50 contain cerebral microbleeds, from three sub-cohorts: SABRE (11 scans), RSS (34 scans), and ALFA (27 scans) (<xref ref-type="bibr" rid="ref20">Sudre et al., 2024</xref>). These images were acquired using either 1.5&#x202F;T or 3&#x202F;T MRI scanners (e.g., Philips Achieva, GE Discovery), using both 2D and 3D T2&#x002A;-GRE protocols. Imaging parameters vary across cohorts, with slice thickness ranging from 0.8&#x202F;mm to 3.0&#x202F;mm and in-plane resolutions reaching up to 0.45&#x202F;&#x00D7;&#x202F;0.45&#x202F;mm<sup>2</sup>. The ground truth CMB annotations were provided by the challenge organizers as part of the MICCAI2021 Small Vessel Disease Segmentation Challenge. Ethical approval for the use of this dataset can be referenced in the corresponding original study.</p>
</sec>
<sec id="sec12">
<label>3.1.2</label>
<title>Cavas dataset</title>
<p>CAVAS is a multi-cohort study focusing on normal aging in the rural population. Initial analysis was performed on a total of 270 scans, from which 154 scans exhibiting microbleeds were selected to form the CAVAS dataset, comprising five sub-cohorts: HY (21 scans), PH (55 scans), CN (43 scans), KB (22 scans), and GH (13 scans). These images were acquired using 3&#x202F;T MRI scanners (e.g., Philips Achieva, Siemens Skyra) with a 2D T2&#x002A;-GRE protocol. Imaging parameters include a slice thickness ranging from 4.0&#x202F;mm to 7.0&#x202F;mm and in-plane resolutions up to 0.80&#x202F;&#x00D7;&#x202F;0.80&#x202F;mm<sup>3</sup> to 0.89&#x202F;&#x00D7;&#x202F;0.89&#x202F;mm<sup>3</sup>. The ground truth CMB annotations were provided by two neurologists participating in the in-depth aging survey research collaboration for rural-based cohorts. All data acquisitions were approved by the institutional review boards of the respective centers: Hanyang University (HYUIRB-202011-012), Chonnam National University (06&#x2013;062), Keimyung University (2020&#x2013;01-058), Wonju Yonsei University College of Medicine (CR320120), and Yonsei University Medical School (4&#x2013;2020-0817).</p>
</sec>
<sec id="sec13">
<label>3.1.3</label>
<title>Yongin severance dataset</title>
<p>The Yongin Severance dataset consists of 374 brain MRI scans, of which 294 exhibit microbleeds. These scans were acquired using a 3D SWI protocol on two types of 3&#x202F;T scanners: Philips Elition and Philips Ingenia CX. All scans were obtained using standardized acquisition parameters with a repetition time (TR) of 51&#x202F;ms and echo time (TE) of 9.8&#x202F;ms. The slice thickness was set to 2.0&#x202F;mm. The in-plane acquisition resolution was 0.60&#x202F;&#x00D7;&#x202F;0.60&#x202F;mm<sup>2</sup>, and the reconstructed resolution was refined to 0.45&#x202F;&#x00D7;&#x202F;0.45&#x202F;mm<sup>2</sup> with 1.0&#x202F;mm slice thickness. The dataset was collected under the approval of the Yongin Severance Hospital Institutional Review Board (eIRB No. 9&#x2013;2025-0165).Ground truth CMB annotations were established through a semi-automated procedure. An initial segmentation model trained on the MICCAI and CAVAS datasets was used to identify potential lesions. Experienced clinicians iteratively reviewed the outputs to eliminate false positives. For the final test set, manual segmentation was independently performed by board-certified neuroradiologists to ensure high annotation accuracy and reliability.</p>
</sec>
</sec>
<sec id="sec14">
<label>3.2</label>
<title>Evaluation metrics</title>
<p>To evaluate the lesion-level performance of the proposed segmentation model, four standard metrics were employed: precision, recall, F1-score, and the average number of false positives per scan (<inline-formula>
<mml:math id="M21">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>). These metrics were selected to comprehensively assess both sensitivity and specificity, as well as the practical viability of the model in clinical applications.</p>
<p>All evaluation metrics were computed on a lesion-wise basis, where a detected lesion was considered a true positive if the Euclidean distance between its centroid and the nearest ground truth lesion was within 4 voxels. This threshold was selected to account for small lesion sizes and spatial localization uncertainty during annotation.</p>
</sec>
<sec id="sec15">
<label>3.3</label>
<title>Settings</title>
<p>To implement and evaluate the proposed model, all experiments were conducted in a controlled computational environment summarized in <xref ref-type="table" rid="tab1">Table 1</xref>.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Summary of the training environment and hyperparameter configuration used in all experiments.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Software/Hardware</th>
<th align="left" valign="top">Model/Parameter</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">GPU</td>
<td align="left" valign="top">NVIDIA RTX A5000 Ada</td>
</tr>
<tr>
<td align="left" valign="top">Framework</td>
<td align="left" valign="top">Pytorch 1.13</td>
</tr>
<tr>
<td align="left" valign="top">Optimizer</td>
<td align="left" valign="top">AdamW</td>
</tr>
<tr>
<td align="left" valign="top">Initial learning rate</td>
<td align="left" valign="top">1e-4 (with warm-up from 1e-7)</td>
</tr>
<tr>
<td align="left" valign="top">Scheduler</td>
<td align="left" valign="top">Cosine Annealing Warmup Restarts</td>
</tr>
<tr>
<td align="left" valign="top">Epochs</td>
<td align="left" valign="top">1,000</td>
</tr>
<tr>
<td align="left" valign="top">Batch size</td>
<td align="left" valign="top">2</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>A warm-up strategy was applied at the beginning of training, where the learning rate was gradually increased from 1e-7 to 1e-4 over the first 10 epochs. After the warm-up phase, a cosine annealing schedule with warm restarts was used to adjust the learning rate dynamically throughout the remaining training epochs.</p>
<p>The model was trained using 2D slice-based input with a batch size of 2 for 1,000 epochs. All experiments were performed on a single NVIDIA RTX A5000 Ada GPU. At inference time, the proposed model required an average of 0.22&#x202F;s per subject, measured on the test set. This runtime includes forward inference and post-processing for lesion detection.</p>
<p>Early stopping was not applied during training. Instead, model selection was performed based on validation performance within each fold of the K-fold cross-validation framework. Specifically, for each fold, the checkpoint achieving the highest validation F1-score was retained and used for final evaluation. Training and validation curves were monitored to ensure stable optimization, and the best-performing checkpoints were not consistently obtained at the final epoch, indicating no systematic overfitting during prolonged training.</p>
</sec>
</sec>
<sec sec-type="results" id="sec16">
<label>4</label>
<title>Results and discussion</title>
<sec id="sec17">
<label>4.1</label>
<title>Quantitative evaluation</title>
<p>To assess the effectiveness of the proposed method, we conducted a comparative evaluation against existing cerebral microbleed (CMB) detection approaches, including both 3D CNN-based and segmentation-based models. The compared models were trained using the dataset employed in this study, with training conducted under conditions that reflected the environments originally proposed for each model. The evaluation metrics include precision, sensitivity (recall), F1-score, and the average number of false positives per subject (<inline-formula>
<mml:math id="M22">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>). A detailed summary of these results is provided in <xref ref-type="table" rid="tab2">Table 2</xref>.</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Comparison of detection performance between the proposed method and existing CMB detection approaches (2D lesion-level).</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Method</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Sensitivity (Recall)</th>
<th align="center" valign="top">F1-score</th>
<th align="center" valign="top">
<italic>FP<sub>avg</sub></italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref6">Dou et al. (2016)</xref>
</td>
<td align="char" valign="top" char=".">0.521</td>
<td align="char" valign="top" char=".">0.740</td>
<td align="char" valign="top" char=".">0.611</td>
<td align="char" valign="top" char=".">5.22</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref7">Fan et al. (2022)</xref>
</td>
<td align="char" valign="top" char=".">0.714</td>
<td align="char" valign="top" char=".">0.765</td>
<td align="char" valign="top" char=".">0.739</td>
<td align="char" valign="top" char=".">2.35</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref23">Wei et al. (2022)</xref>
</td>
<td align="char" valign="top" char=".">0.822</td>
<td align="char" valign="top" char=".">0.863</td>
<td align="char" valign="top" char=".">0.842</td>
<td align="char" valign="top" char=".">1.43</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref21">Tsuchida et al. (2024)</xref>
</td>
<td align="char" valign="top" char=".">0.767</td>
<td align="char" valign="top" char=".">0.730</td>
<td align="char" valign="top" char=".">0.749</td>
<td align="char" valign="top" char=".">1.71</td>
</tr>
<tr>
<td align="left" valign="top">Ours</td>
<td align="char" valign="top" char="."><bold>0.891</bold></td>
<td align="char" valign="top" char="."><bold>0.887</bold></td>
<td align="char" valign="top" char="."><bold>0.887</bold></td>
<td align="char" valign="top" char="."><bold>0.83</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold values indicate the performance of the proposed model.</p>
</table-wrap-foot>
</table-wrap>
<p>The proposed method demonstrates superior performance across all evaluation criteria. In terms of precision, our model achieves a value of 0.891, which is substantially higher than that of Dou et al. (0.521), Fan et al. (0.714), and even more recent methods such as Wei et al. (0.822) and Tsuchida et al. (0.767). This indicates that the proposed model is highly effective at reducing false positive predictions while maintaining accurate identification of true CMBs. Sensitivity also improves noticeably, reaching 0.887, which suggests the model is capable of detecting a large proportion of true lesions. Compared to other methods, which show recall values in the range of 0.730 to 0.863, this represents a meaningful gain in lesion retrieval ability.</p>
<p>The F1-score, which reflects the balance between precision and recall, is also the highest among all compared methods, with our model achieving a score of 0.887. This confirms that the model does not sacrifice sensitivity for precision or vice versa, but rather achieves a strong trade-off between both. Furthermore, the average number of false positives per subject (<inline-formula>
<mml:math id="M24">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>) is significantly reduced to 0.83, while previous methods report considerably higher values, such as 5.22 for Dou et al., 2.35 for Fan et al., and 1.71 for Tsuchida et al. This dramatic reduction in <inline-formula>
<mml:math id="M25">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> highlights the model&#x2019;s robustness and its ability to suppress incorrect activations.</p>
<p>Overall, the consistent improvements in all metrics underscore the practical advantages of integrating CBAM into the RLK-UNet architecture. The attention mechanism contributes to better localization and discrimination of lesion-relevant features while reducing interference from non-lesion signals. These results strongly support the effectiveness of the proposed method for reliable and accurate detection of CMBs in complex neuroimaging data.</p>
</sec>
<sec id="sec18">
<label>4.2</label>
<title>Cross-validation performance analysis</title>
<p>To evaluate the generalizability and robustness of the proposed RLK-UNet with CBAM, we conducted a 5-fold cross-validation on the dataset. <xref ref-type="table" rid="tab3">Table 3</xref> presents the detailed lesion-level detection performance for each fold in terms of precision, sensitivity (recall), F1-score, and the average number of false positives per subject (<inline-formula>
<mml:math id="M26">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>). Across all folds, the model consistently demonstrated high detection accuracy with an average precision of 0.891<inline-formula>
<mml:math id="M27">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>0.015, sensitivity of 0.887<inline-formula>
<mml:math id="M28">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>0.011, and F1-score of 0.887<inline-formula>
<mml:math id="M29">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>0.013, while maintaining a low false positive rate of <inline-formula>
<mml:math id="M30">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>= 0.83<inline-formula>
<mml:math id="M31">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>0.016.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption>
<p>Lesion-level performance across five folds.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Folds</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Sensitivity (Recall)</th>
<th align="center" valign="top">F1-score</th>
<th align="center" valign="top">
<italic>FP<sub>avg</sub></italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Fold 1</td>
<td align="center" valign="top">0.901</td>
<td align="center" valign="top">0.893</td>
<td align="center" valign="top">0.897</td>
<td align="center" valign="top">0.75</td>
</tr>
<tr>
<td align="left" valign="top">Fold 2</td>
<td align="center" valign="top">0.889</td>
<td align="center" valign="top">0.884</td>
<td align="center" valign="top">0.886</td>
<td align="center" valign="top">0.85</td>
</tr>
<tr>
<td align="left" valign="top">Fold 3</td>
<td align="center" valign="top">0.873</td>
<td align="center" valign="top">0.870</td>
<td align="center" valign="top">0.871</td>
<td align="center" valign="top">0.97</td>
</tr>
<tr>
<td align="left" valign="top">Fold 4</td>
<td align="center" valign="top">0.910</td>
<td align="center" valign="top">0.897</td>
<td align="center" valign="top">0.903</td>
<td align="center" valign="top">0.68</td>
</tr>
<tr>
<td align="left" valign="top">Fold 5</td>
<td align="center" valign="top">0.882</td>
<td align="center" valign="top">0.893</td>
<td align="center" valign="top">0.880</td>
<td align="center" valign="top">0.90</td>
</tr>
<tr>
<td align="left" valign="top">Average</td>
<td align="center" valign="top"><bold>0.891</bold>
<inline-formula>
<mml:math id="M33">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>
<bold>0.015</bold></td>
<td align="center" valign="top"><bold>0.887</bold>
<inline-formula>
<mml:math id="M34">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>
<bold>0.011</bold></td>
<td align="center" valign="top"><bold>0.887</bold>
<inline-formula>
<mml:math id="M35">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>
<bold>0.013</bold></td>
<td align="center" valign="top"><bold>0.83</bold>
<inline-formula>
<mml:math id="M36">
<mml:mo>&#x00B1;</mml:mo>
</mml:math>
</inline-formula>
<bold>0.016</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold values indicate the performance of the proposed model.</p>
</table-wrap-foot>
</table-wrap>
<p>Notably, the variation across folds was minimal, indicating stable performance regardless of data split. Fold 4 achieved the best results across all metrics, with the highest precision (0.910) and F1-score (0.903), as well as the lowest <inline-formula>
<mml:math id="M37">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> (0.68). These findings support the robustness of the proposed model architecture and the effectiveness of incorporating CBAM into the skip connections for reducing false positives while maintaining sensitivity. The low standard deviation across metrics further suggests that the model can reliably detect cerebral microbleeds across different subsets of the data.</p>
</sec>
<sec id="sec19">
<label>4.3</label>
<title>Performance analysis on small lesions (<inline-formula>
<mml:math id="M38">
<mml:mo>&#x2264;</mml:mo>
</mml:math>
</inline-formula> 3&#x202F;mm)</title>
<p>To assess the performance of our model on small cerebral microbleeds (CMBs), we conducted a comparative analysis using lesions with a maximum diameter of 3&#x202F;mm. <xref ref-type="table" rid="tab4">Table 4</xref> summarizes the quantitative results across different methods. Our model achieved the highest performance across all key metrics, with a precision of 0.854, sensitivity of 0.883, and F1-score of 0.869. It also reported the lowest average number of false positives per subject (FP_avg&#x202F;=&#x202F;0.91), indicating effective suppression of incorrect detections.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption>
<p>Quantitative comparison of lesion-level detection performance for cerebral microbleeds (CMBs)&#x202F;&#x2264;&#x202F;3&#x202F;mm in diameter.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Method</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Sensitivity (Recall)</th>
<th align="center" valign="top">F1-score</th>
<th align="center" valign="top">
<italic>FP<sub>avg</sub></italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref6">Dou et al. (2016)</xref>
</td>
<td align="char" valign="top" char=".">0.492</td>
<td align="char" valign="top" char=".">0.731</td>
<td align="char" valign="top" char=".">0.587</td>
<td align="char" valign="top" char=".">5.31</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref7">Fan et al. (2022)</xref>
</td>
<td align="char" valign="top" char=".">0.773</td>
<td align="char" valign="top" char=".">0.754</td>
<td align="char" valign="top" char=".">0.763</td>
<td align="char" valign="top" char=".">2.55</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref23">Wei et al. (2022)</xref>
</td>
<td align="char" valign="top" char=".">0.805</td>
<td align="char" valign="top" char=".">0.849</td>
<td align="char" valign="top" char=".">0.826</td>
<td align="char" valign="top" char=".">1.66</td>
</tr>
<tr>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref21">Tsuchida et al. (2024)</xref>
</td>
<td align="char" valign="top" char=".">0.747</td>
<td align="char" valign="top" char=".">0.717</td>
<td align="char" valign="top" char=".">0.732</td>
<td align="char" valign="top" char=".">1.91</td>
</tr>
<tr>
<td align="left" valign="top">Ours</td>
<td align="char" valign="top" char="."><bold>0.854</bold></td>
<td align="char" valign="top" char="."><bold>0.883</bold></td>
<td align="char" valign="top" char="."><bold>0.869</bold></td>
<td align="char" valign="top" char="."><bold>0.91</bold></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The bold values indicate the performance of the proposed model.</p>
</table-wrap-foot>
</table-wrap>
<p>Compared to the method by Wei et al., our model showed an F1-score improvement of 0.043 and a substantial reduction in FP_avg from 1.66 to 0.91, demonstrating its ability to detect subtle and low-contrast lesions with higher precision. These results suggest that the attention-based skip connection integrated into our model effectively enhances the network&#x2019;s focus on small lesion features.</p>
<p>Overall, the proposed model provides accurate and reliable detection performance even for tiny lesions, supporting its potential for clinical application in scenarios where high sensitivity and precision are essential.</p>
</sec>
<sec id="sec20">
<label>4.4</label>
<title>Qualitative evaluation</title>
<p>To further evaluate the performance of the proposed model, qualitative visualization results are presented in <xref ref-type="fig" rid="fig2">Figures 2</xref>, <xref ref-type="fig" rid="fig3">3</xref>. These figures provide representative examples across a diverse range of imaging appearances and lesion characteristics.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Visual comparison of detection results on representative brain slices. The first row shows original T2&#x002A;-GRE or SWI images, and the second row displays prediction results. True positives (blue), false positives (yellow), and false negatives (red) are color-coded for visual clarity.</p>
</caption>
<graphic xlink:href="fnins-20-1743039-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Grid of sixteen brain MRI scans in two rows, where the top row shows original images and the bottom row highlights findings with colored dots: red for false negatives, yellow for false positives, and blue for true positives, following the legend at the top.</alt-text>
</graphic>
</fig>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Qualitative visualization of detection results for very small cerebral microbleeds (&#x003C;3&#x202F;mm). The examples highlight that the proposed model successfully detects subtle lesions occupying only a few voxels. Although minor discrepancies in lesion size or boundary may occur, the model consistently localizes microbleeds at the correct anatomical sites.</p>
</caption>
<graphic xlink:href="fnins-20-1743039-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Four brain MRI scan slices arranged in a two-by-two grid, with the lower images showing red highlighted areas and magnified insets indicating specific points of interest in the brain tissue.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="fig2">Figure 2</xref> illustrates qualitative detection results for multiple subjects, where each column corresponds to an axial slice from a different case. The top row shows the original T2&#x002A;-GRE or SWI images without annotations, serving as a reference for image contrast and quality. The bottom row overlays the model&#x2019;s prediction results, with detections color-coded as follows: true positives (TP, blue), false positives (FP, yellow), and false negatives (FN, red).</p>
<p>As observed in <xref ref-type="fig" rid="fig2">Figure 2</xref>, the proposed model successfully detects the majority of cerebral microbleeds with high localization accuracy. Most true positive detections are well aligned with the manually annotated ground truth, indicating precise lesion localization. While minor variations in lesion shape or size may be present, the predicted lesions generally exhibit strong visual correspondence with the ground truth annotations. A small number of false positives are observed, typically arising from low-intensity regions or imaging artifacts that visually resemble microbleeds. False negatives are relatively rare and, when present, are primarily associated with extremely small or faint lesions.</p>
<p>While <xref ref-type="fig" rid="fig2">Figure 2</xref> demonstrates the overall detection behavior of the proposed method, <xref ref-type="fig" rid="fig3">Figure 3</xref> provides additional qualitative examples focusing specifically on very small cerebral microbleeds (&#x003C;3&#x202F;mm), which are known to be particularly challenging to detect even for experienced human readers. In these cases, lesions occupy only a few voxels and often exhibit low contrast relative to surrounding tissue.</p>
<p>As shown in <xref ref-type="fig" rid="fig3">Figure 3</xref>, the proposed model is able to reliably localize the presence of these very small microbleeds, even when slight discrepancies in lesion extent or boundary definition are observed. Although the predicted segmentation may not perfectly match the precise voxel-level annotations, the model consistently identifies the correct anatomical locations of the lesions. This behavior suggests that the proposed approach prioritizes robust lesion detection over exact boundary delineation in extremely small lesions, which is appropriate for automated CMB screening and burden assessment tasks.</p>
<p>Overall, the qualitative results presented in <xref ref-type="fig" rid="fig2">Figures 2</xref>, <xref ref-type="fig" rid="fig3">3</xref> are consistent with the quantitative findings reported earlier, demonstrating that the attention-enhanced architecture effectively captures subtle CMB patterns while suppressing spurious activations, even in challenging small-lesion scenarios.</p>
</sec>
<sec id="sec21">
<label>4.5</label>
<title>Cross-modality generalization between T2&#x002A;-GRE and SWI</title>
<p>To evaluate the robustness and generalizability of the proposed method across different imaging modalities, we conducted a cross-modality evaluation between T2&#x002A;-weighted gradient echo (T2&#x002A;-GRE) and susceptibility-weighted imaging (SWI), as summarized in <xref ref-type="table" rid="tab5">Table 5</xref>. In this experiment, the model was trained exclusively on one modality and tested on the other, thereby assessing its ability to generalize beyond the training domain.</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption>
<p>Cross-modality evaluation results of the proposed method.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Train data</th>
<th align="left" valign="top">Test data</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Sensitivity (Recall)</th>
<th align="center" valign="top">F1-score</th>
<th align="center" valign="top">
<italic>FP<sub>avg</sub></italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">T2&#x002A;-GRE</td>
<td align="left" valign="top">SWI</td>
<td align="char" valign="top" char=".">0.758</td>
<td align="char" valign="top" char=".">0.771</td>
<td align="char" valign="top" char=".">0.765</td>
<td align="char" valign="top" char=".">2.18</td>
</tr>
<tr>
<td align="left" valign="top">SWI</td>
<td align="left" valign="top">T2&#x002A;-GRE</td>
<td align="char" valign="top" char=".">0.813</td>
<td align="char" valign="top" char=".">0.827</td>
<td align="char" valign="top" char=".">0.820</td>
<td align="char" valign="top" char=".">1.83</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The model was trained on one imaging modality (T2&#x002A;-GRE or SWI) and evaluated on the other to assess modality-specific robustness and generalization performance.</p>
</table-wrap-foot>
</table-wrap>
<p>When trained on T2&#x002A;-GRE and tested on SWI, the proposed model achieved a precision of 0.758, a sensitivity of 0.771, and an F1-score of 0.765, with an average false positive rate (<inline-formula>
<mml:math id="M41">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>) of 2.18 per subject. Conversely, training on SWI and testing on T2&#x002A;-GRE resulted in improved performance, yielding a precision of 0.813, a sensitivity of 0.827, an F1-score of 0.820, and a reduced <inline-formula>
<mml:math id="M42">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> of 1.83.</p>
<p>The relatively stronger performance observed when training on SWI can be attributed, at least in part, to the higher number of slices typically available in SWI acquisitions, which provides a larger and more diverse set of training samples. This increased data availability may allow the model to learn more robust representations of susceptibility-related features, which subsequently transfer more effectively to T2&#x002A;-GRE images despite differences in contrast characteristics and artifact profiles.</p>
<p>Importantly, although a moderate performance gap is observed between the two training configurations, the proposed method maintains stable detection performance in both cross-modality settings. This indicates that the model does not rely on modality-specific cues alone, but instead captures modality-invariant characteristics of cerebral microbleeds. Furthermore, when compared with existing CMB detection approaches reported in <xref ref-type="table" rid="tab2">Table 2</xref>, the proposed method demonstrates competitive or superior performance even under cross-modality evaluation, highlighting its strong generalization capability.</p>
<p>Overall, these results suggest that the proposed attention-enhanced architecture exhibits robust cross-modality generalization between T2&#x002A;-GRE and SWI, supporting its potential applicability in heterogeneous clinical environments where imaging protocols and acquisition settings may vary.</p>
</sec>
<sec id="sec22">
<label>4.6</label>
<title>Subject-level prediction of CMB counts aligned with ARIA-H severity framework</title>
<p>To evaluate the clinical relevance and consistency of our model&#x2019;s performance, we analyzed its ability to estimate the number of cerebral microbleeds (CMBs) per subject using four severity intervals defined by the ARIA-H radiographic classification: 0, 1&#x2013;4, 5&#x2013;9, and &#x2265;10 (<xref ref-type="bibr" rid="ref12">Hampel et al., 2023</xref>). This categorization reflects clinically meaningful thresholds, as outlined in anti-amyloid immunotherapy trials where the CMB burden is used to guide treatment eligibility and safety monitoring. <xref ref-type="table" rid="tab6">Table 6</xref> presents the subject-level confusion matrix based on this classification scheme.</p>
<table-wrap position="float" id="tab6">
<label>Table 6</label>
<caption>
<p>Subject-level confusion matrix of predicted versus ground-truth CMB counts categorized by ARIA-H severity intervals.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Prediction GT</th>
<th align="center" valign="top">0</th>
<th align="center" valign="top">1&#x2013;4</th>
<th align="center" valign="top">5&#x2013;9</th>
<th align="center" valign="top">
<inline-formula>
<mml:math id="M43">
<mml:mo>&#x2265;</mml:mo>
<mml:mn>10</mml:mn>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">0</td>
<td align="center" valign="top">6</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">0</td>
<td align="center" valign="top">0</td>
</tr>
<tr>
<td align="left" valign="top">1&#x2013;4</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">46</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">0</td>
</tr>
<tr>
<td align="left" valign="top">5&#x2013;9</td>
<td align="center" valign="top">0</td>
<td align="center" valign="top">2</td>
<td align="center" valign="top">6</td>
<td align="center" valign="top">1</td>
</tr>
<tr>
<td align="left" valign="top">
<inline-formula>
<mml:math id="M44">
<mml:mo>&#x2265;</mml:mo>
<mml:mn>10</mml:mn>
</mml:math>
</inline-formula>
</td>
<td align="center" valign="top">0</td>
<td align="center" valign="top">0</td>
<td align="center" valign="top">1</td>
<td align="center" valign="top">4</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The results show that the model&#x2019;s predictions are predominantly aligned with the ground truth. Among the 50 subjects with 1&#x2013;4 true CMBs, 46 (92.0%) were correctly categorized. In the 5&#x2013;9 group, the majority of cases (6 out of 9) were accurately predicted, while minor misclassifications occurred only in adjacent classes (e.g., 1&#x2013;4 or &#x2265;10), which are clinically tolerable given the gradational nature of severity. Importantly, no subject without CMBs was mistakenly predicted to have a moderate (5&#x2013;9) or severe (&#x2265;10) lesion burden. Similarly, all subjects with &#x2265;10 CMBs were either correctly classified or placed in the adjacent group (5&#x2013;9), with no cases underestimated into the 1&#x2013;4 or 0 category.</p>
<p>Such prediction patterns highlight the model&#x2019;s ability to avoid extreme overestimation or underestimation of CMB burden&#x2014;an essential quality for reliable clinical deployment. This behavior suggests that the model is not only detecting individual lesions accurately but also maintaining count-level consistency that reflects the true severity of each subject.</p>
<p>We attribute this strength to the CBAM-based skip connection design integrated into our model. While conventional U-Net skip connections may indiscriminately transfer both relevant and irrelevant features&#x2014;including vessels or background structures&#x2014;our model leverages spatial and channel attention to selectively enhance lesion-specific representations. This mechanism appears to suppress false positives and improve true lesion detection across a wide spectrum of lesion counts.</p>
<p>Overall, the results demonstrate that our model maintains a clinically aligned, component-level understanding of lesion burden. This capability is particularly critical in the context of ARIA-H&#x2013;related safety evaluations, where accurate and conservative quantification of microhemorrhages directly impacts trial eligibility and patient risk stratification.</p>
</sec>
<sec id="sec23">
<label>4.7</label>
<title>Subject-level correlation analysis for CMB burden estimation</title>
<p>To further assess the reliability of the proposed method at the subject level, we performed a correlation analysis between the predicted and ground-truth cerebral microbleed (CMB) counts on the test set (<italic>n</italic>&#x202F;=&#x202F;72). For each subject, the total number of detected CMBs was compared with the manually annotated CMB count.</p>
<p>As shown in <xref ref-type="fig" rid="fig4">Figure 4</xref>, the predicted CMB counts exhibit a strong positive correlation with the ground-truth counts. The Spearman correlation coefficient was <inline-formula>
<mml:math id="M45">
<mml:mi>&#x03C1;</mml:mi>
</mml:math>
</inline-formula>&#x202F;=&#x202F;0.93, indicating a high degree of monotonic agreement between predicted and true CMB burdens. To quantify the statistical reliability of this correlation, a 95% confidence interval (CI) was computed using Fishter z-transformation, yielding a CI of [0.89&#x2013;0.96] (<italic>p</italic>&#x202F;&#x003C;&#x202F;0.001).</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>Subject-level correlation between predicted and ground-truth cerebral microbleed counts on the test set (<italic>n</italic>&#x202F;=&#x202F;72). Each point represents one subject. The regression line and confidence band illustrate the relationship between predicted and true CMB burdens. The Spearman correlation coefficient (<inline-formula>
<mml:math id="M46">
<mml:mi>&#x03C1;</mml:mi>
</mml:math>
</inline-formula>&#x202F;=&#x202F;0.93) with its 95% confidence interval ([0.890.96]) demonstrates strong and statistically reliable agreement between predicted and ground-truth CMB counts.</p>
</caption>
<graphic xlink:href="fnins-20-1743039-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Scatter plot titled &#x201C;Correlation Plot&#x201D; displays Ground Truth CMB Counts on the x-axis and Prediction CMB Counts on the y-axis. Data points align closely with a fitted regression line, equation y equals 1.007406x plus 0.058234, with correlation coefficient r equals 0.97, indicating strong positive linear correlation.</alt-text>
</graphic>
</fig>
<p>In addition to the correlation coefficient, the regression line demonstrates a slope close to unity and a near-zero intercept, suggesting that the proposed model does not systematically overestimate or underestimate the CMB burden across subjects. While minor dispersion is observed in low-burden cases, the overall trend remains consistent across the entire burden range.</p>
<p>These results indicate that the proposed method provides stable and reliable subject-level CMB burden estimation, supporting the robustness of the reported performance beyond single point estimates and reinforcing its potential applicability for clinical CMB severity assessment.</p>
</sec>
<sec id="sec24">
<label>4.8</label>
<title>Effectiveness of CBAM: quantitative and visual analysis</title>
<p>To assess the impact of the Convolutional Block Attention Module (CBAM) in our RLK-UNet framework, we conducted a series of ablation experiments focused on both quantitative performance and qualitative attention visualization (<xref ref-type="bibr" rid="ref18">Selvaraju et al., 2017</xref>).</p>
<sec id="sec25">
<label>4.8.1</label>
<title>Quantitative evaluation</title>
<p><xref ref-type="table" rid="tab7">Table 7</xref> compares the performance of the model with and without CBAM applied to all skip connections. The inclusion of CBAM yielded a notable improvement across all metrics. Precision increased from 0.783 to 0.891, recall from 0.855 to 0.887, and F1-score from 0.812 to 0.887. Furthermore, the average number of false positives per subject (<inline-formula>
<mml:math id="M47">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>) dropped substantially from 1.80 to 0.83, demonstrating the role of CBAM in suppressing irrelevant activations.</p>
<table-wrap position="float" id="tab7">
<label>Table 7</label>
<caption>
<p>Ablation study comparing the performance with and without CBAM.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Configuration</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Sensitivity (Recall)</th>
<th align="center" valign="top">F1-score</th>
<th align="center" valign="top">
<italic>FP<sub>avg</sub></italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Without CBAM</td>
<td align="char" valign="top" char=".">0.783</td>
<td align="char" valign="top" char=".">0.855</td>
<td align="char" valign="top" char=".">0.812</td>
<td align="char" valign="top" char=".">1.80</td>
</tr>
<tr>
<td align="left" valign="top">With CBAM</td>
<td align="char" valign="top" char=".">0.891</td>
<td align="char" valign="top" char=".">0.887</td>
<td align="char" valign="top" char=".">0.887</td>
<td align="char" valign="top" char=".">0.83</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>To further investigate the effect of the number of CBAM modules, we evaluated configurations with CBAM applied to one, two, or all three skip connections. The results are shown in <xref ref-type="table" rid="tab8">Table 8</xref>. As the number of CBAM modules increased, performance consistently improved. Specifically, F1-score improved from 0.850 (CBAM-Skip1) to 0.860 (CBAM-Skip2), and 0.887 (CBAM-AllSkip), while <inline-formula>
<mml:math id="M49">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula> decreased from 1.23 to 1.11 and finally 0.83. These findings suggest that the use of CBAM in multiple skip pathways contributes cumulatively to enhanced feature refinement and noise suppression.</p>
<table-wrap position="float" id="tab8">
<label>Table 8</label>
<caption>
<p>Ablation study of CBAM placement in skip connections.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Configuration</th>
<th align="center" valign="top">Precision</th>
<th align="center" valign="top">Sensitivity (Recall)</th>
<th align="center" valign="top">F1-score</th>
<th align="center" valign="top">
<italic>FP<sub>avg</sub></italic>
</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Without CBAM</td>
<td align="char" valign="top" char=".">0.783</td>
<td align="char" valign="top" char=".">0.855</td>
<td align="char" valign="top" char=".">0.812</td>
<td align="char" valign="top" char=".">1.80</td>
</tr>
<tr>
<td align="left" valign="top">CBAM-Skip1</td>
<td align="char" valign="top" char=".">0.841</td>
<td align="char" valign="top" char=".">0.857</td>
<td align="char" valign="top" char=".">0.850</td>
<td align="char" valign="top" char=".">1.23</td>
</tr>
<tr>
<td align="left" valign="top">CBAM-Skip2</td>
<td align="char" valign="top" char=".">0.856</td>
<td align="char" valign="top" char=".">0.864</td>
<td align="char" valign="top" char=".">0.860</td>
<td align="char" valign="top" char=".">1.11</td>
</tr>
<tr>
<td align="left" valign="top">CBAM-AllSkip</td>
<td align="char" valign="top" char=".">0.891</td>
<td align="char" valign="top" char=".">0.887</td>
<td align="char" valign="top" char=".">0.887</td>
<td align="char" valign="top" char=".">0.83</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The results show how selectively applying CBAM to 1, 2, or all skip connections influences lesion-level detection performance.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec26">
<label>4.8.2</label>
<title>Visual analysis</title>
<p>To explore how CBAM affects spatial attention, we visualized Grad-CAM maps extracted from the final decoder block, comparing the model without CBAM and the model with CBAM applied to all skip connections, as shown in <xref ref-type="fig" rid="fig5">Figures 5</xref>, <xref ref-type="fig" rid="fig6">6</xref>.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Grad-CAM visualizations comparing models without (top row) and with (bottom row) CBAM. Each pair shows the same axial brain slice. CBAM enhances attention localization near true microbleeds and suppresses irrelevant activations in the background. The color bar indicates activation intensity, ranging from low (blue) to high (red).</p>
</caption>
<graphic xlink:href="fnins-20-1743039-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Two rows of six heatmap-style brain scan images compare CBAM X (top row) and CBAM O (bottom row) attention mechanisms, with a color bar on the right indicating intensity from blue (lowest) to red (highest).</alt-text>
</graphic>
</fig>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption>
<p>Enlarged Grad-CAM visualizations of small cerebral microbleeds, illustrating differences in attention localization between models without CBAM and with CBAM applied to skip connections.</p>
</caption>
<graphic xlink:href="fnins-20-1743039-g006.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Four brain scan images compare CBAM X (top row) and CBAM O (bottom row) models, each with a magnified inset highlighting specific brain regions, demonstrating differences in image feature localization and clarity.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="fig5">Figure 5</xref> presents representative Grad-CAM visualizations across multiple subjects. In the absence of CBAM, the attention maps exhibit relatively diffuse activation patterns that frequently extend into non-lesion or background regions, which is consistent with the higher number of false positives observed quantitatively. In contrast, when CBAM is incorporated into the skip connections, the activation maps become more spatially concentrated and selectively focused on lesion-relevant regions, with reduced off-target responses.</p>
<p>To further improve the interpretability of these attention patterns, <xref ref-type="fig" rid="fig6">Figure 6</xref> provides enlarged views of selected cases, highlighting small cerebral microbleeds with additional zoomed-in regions. These focused visualizations allow clearer inspection of the spatial relationship between the Grad-CAM responses and the underlying lesion locations. Even for small and subtle lesions, the CBAM-equipped model consistently demonstrates localized attention centered on the lesion, whereas the model without CBAM shows broader and less specific activation.</p>
<p>Importantly, while slight discrepancies in the exact spatial extent of the highlighted regions may remain, the CBAM-based model reliably identifies the correct anatomical locations of the lesions. This behavior indicates that CBAM enhances the model&#x2019;s ability to prioritize diagnostically meaningful features rather than diffuse background cues.</p>
<p>Taken together, the quantitative performance improvements and the qualitative Grad-CAM visualizations in <xref ref-type="fig" rid="fig5">Figures 5</xref>, <xref ref-type="fig" rid="fig6">6</xref> confirm that CBAM plays a critical role in improving detection precision and robustness by enhancing spatial and channel-wise feature representation during skip connection fusion.</p>
</sec>
</sec>
</sec>
<sec sec-type="conclusions" id="sec27">
<label>5</label>
<title>Conclusion</title>
<p>In this study, we presented an enhanced deep learning framework for cerebral microbleed (CMB) detection that directly addresses one of the most persistent challenges in this field: the trade-off between sensitivity and precision caused by the visual similarity of CMBs to veins, calcifications, and other susceptibility artifacts. Whereas conventional single-stage segmentation models often propagate these confounding signals through skip connections&#x2014;achieving higher recall at the expense of precision or vice versa&#x2014;our approach redefines the skip pathway as a selective, context-filtered channel. By combining large-kernel residual local convolutions, which capture broad contextual cues necessary to distinguish spherical CMBs from elongated vascular structures, with Convolutional Block Attention Modules (CBAM) embedded in every skip connection, the proposed RLK-UNet suppresses noise-related activations and selectively amplifies lesion-relevant features. This design represents not a simple combination of modules but a principled solution to the root cause of false-positive propagation in U-Net&#x2013;based models.</p>
<p>Comprehensive quantitative evaluation demonstrated that this architecture achieved state-of-the-art performance across all major metrics, with a precision of 0.891, recall of 0.887, F1-score of 0.887, and a markedly reduced average false positive rate (<inline-formula>
<mml:math id="M51">
<mml:mi>F</mml:mi>
<mml:msub>
<mml:mi>P</mml:mi>
<mml:mi mathvariant="italic">avg</mml:mi>
</mml:msub>
</mml:math>
</inline-formula>) of 0.83 per subject. These results significantly outperform prior approaches such as MMOC-Net and SHIVA-CMB, underscoring the effectiveness of context-filtered skip connections for small lesion detection. Ablation experiments confirmed the central role of CBAM in reducing false positives without compromising recall, while Grad-CAM visualizations provided interpretable evidence of how the model suppresses irrelevant structures and concentrates attention on true lesion areas.</p>
<p>Importantly, our model demonstrated clinical relevance by producing subject-level CMB burden estimates that aligned closely with ARIA-H severity categories, a feature directly applicable to the monitoring of patients receiving anti-amyloid therapies. This translational value highlights the potential of our framework not only for research purposes but also as a practical tool for risk stratification, treatment safety evaluation, and long-term disease monitoring in real-world neuroimaging workflows.</p>
<p>Taken together, our findings indicate that RLK-UNet with CBAM offers a robust and clinically meaningful solution for automated CMB detection, balancing precision and sensitivity within a single-stage architecture while maintaining interpretability. In future work, we aim to validate the model on larger, multi-center datasets with diverse acquisition protocols, extend the framework to other small cerebrovascular lesions such as lacunes and enlarged perivascular spaces, and integrate self-supervised learning strategies and uncertainty estimation to further enhance generalizability and clinical trust.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec28">
<title>Data availability statement</title>
<p>The data analyzed in this study is subject to the following licenses/restrictions: some of the datasets are private. Requests to access these datasets should be directed to <email xlink:href="mailto:ljm@hanyang.ac.kr">ljm@hanyang.ac.kr</email>.</p>
</sec>
<sec sec-type="ethics-statement" id="sec29">
<title>Ethics statement</title>
<p>The studies involving humans were approved by MICCAI 2021 dataset SABRE&#x2014;National Research Ethics Service Committee, London-Fulham (14/LO/0108) RSS&#x2014;Population Research Act from the Ministry of Health ALFA&#x2014;Independent Ethics Committee Parc de Salut Mar Barcelona and registered at <ext-link xlink:href="https://Clinicaltrials.gov" ext-link-type="uri">Clinicaltrials.gov</ext-link> (NCT01835717) Cavas dataset HY&#x2014;Hanyang University HYUIRB-202011-012 CN&#x2014;Chonnam National University 06&#x2013;062&#x202F;KB&#x2014;Keimyung University 2020&#x2013;01-058 PH&#x2014;Wonju Yonsei University College of Medicine CR320120 GH&#x2014;Yonsei University Medical School 4&#x2013;2020-0817 Yongin Severance dataset eIRB-9-2025-0165. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="sec30">
<title>Author contributions</title>
<p>KC: Formal analysis, Methodology, Software, Writing &#x2013; original draft. JJ: Formal analysis, Methodology, Writing &#x2013; original draft, Data curation. SeK: Methodology, Writing &#x2013; original draft, Investigation, Software, Validation. YK: Investigation, Methodology, Conceptualization, Data curation, Writing &#x2013; review &#x0026; editing. Y-MK: Data curation, Investigation, Writing &#x2013; review &#x0026; editing, Validation. MK: Data curation, Investigation, Validation, Writing &#x2013; review &#x0026; editing. M-HS: Data curation, Investigation, Validation, Writing &#x2013; review &#x0026; editing. IC: Data curation, Investigation, Validation, Writing &#x2013; review &#x0026; editing. SaK: Data curation, Investigation, Validation, Writing &#x2013; review &#x0026; editing. HK: Data curation, Investigation, Validation, Writing &#x2013; review &#x0026; editing. CP: Investigation, Validation, Writing &#x2013; review &#x0026; editing, Conceptualization, Methodology. J-ML: Conceptualization, Writing &#x2013; review &#x0026; editing, Funding acquisition, Project administration, Supervision.</p>
</sec>
<sec sec-type="COI-statement" id="sec31">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec32">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec33">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Akoudad</surname><given-names>S.</given-names></name> <name><surname>Wolters</surname><given-names>F. J.</given-names></name> <name><surname>Viswanathan</surname><given-names>A.</given-names></name> <name><surname>de Bruijn</surname><given-names>R. F.</given-names></name> <name><surname>van der Lugt</surname><given-names>A.</given-names></name> <name><surname>Hofman</surname><given-names>A.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Association of cerebral microbleeds with cognitive decline and dementia</article-title>. <source>JAMA Neurol.</source> <volume>73</volume>, <fpage>934</fpage>&#x2013;<lpage>943</lpage>. doi: <pub-id pub-id-type="doi">10.1001/jamaneurol.2016.1017</pub-id>, <pub-id pub-id-type="pmid">27271785</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Al-Masni</surname><given-names>M. A.</given-names></name> <name><surname>Kim</surname><given-names>W.-R.</given-names></name> <name><surname>Kim</surname><given-names>E. Y.</given-names></name> <name><surname>Noh</surname><given-names>Y.</given-names></name> <name><surname>Kim</surname><given-names>D.-H.</given-names></name></person-group> (<year>2020</year>). <article-title>Automated detection of cerebral microbleeds in MR images: a two-stage deep learning approach</article-title>. <source>Neuroimage Clin.</source> <volume>28</volume>:<fpage>102464</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nicl.2020.102464</pub-id>, <pub-id pub-id-type="pmid">33395960</pub-id></mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barnes</surname><given-names>S. R. S.</given-names></name> <name><surname>Haacke</surname><given-names>E. M.</given-names></name> <name><surname>Ayaz</surname><given-names>M.</given-names></name> <name><surname>Boikov</surname><given-names>A. S.</given-names></name> <name><surname>Kirsch</surname><given-names>W.</given-names></name> <name><surname>Kido</surname><given-names>D.</given-names></name></person-group> (<year>2011</year>). <article-title>Semiautomated detection of cerebral microbleeds in magnetic resonance images</article-title>. <source>Magn. Reson. Imaging</source> <volume>29</volume>, <fpage>844</fpage>&#x2013;<lpage>852</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.mri.2011.02.028</pub-id>, <pub-id pub-id-type="pmid">21571479</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cordonnier</surname><given-names>C.</given-names></name> <name><surname>Potter</surname><given-names>G. M.</given-names></name> <name><surname>Jackson</surname><given-names>C. A.</given-names></name> <name><surname>Doubal</surname><given-names>F.</given-names></name> <name><surname>Keir</surname><given-names>S.</given-names></name> <name><surname>Sudlow</surname><given-names>C. L. M.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Improving interrater agreement about brain microbleeds: development of the brain observer MicroBleed scale (BOMBS): development of the brain observer MicroBleed scale (BOMBS)</article-title>. <source>Stroke</source> <volume>40</volume>, <fpage>94</fpage>&#x2013;<lpage>99</lpage>. doi: <pub-id pub-id-type="doi">10.1161/STROKEAHA.108.526996</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cordonnier</surname><given-names>C.</given-names></name> <name><surname>van der Flier</surname><given-names>W. M.</given-names></name></person-group> (<year>2011</year>). <article-title>Brain microbleeds and Alzheimer&#x2019;s disease: innocent observation or key player?</article-title> <source>Brain</source> <volume>134</volume>, <fpage>335</fpage>&#x2013;<lpage>344</lpage>. doi: <pub-id pub-id-type="doi">10.1093/brain/awq321</pub-id>, <pub-id pub-id-type="pmid">21257651</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dou</surname><given-names>Q.</given-names></name> <name><surname>Chen</surname><given-names>H.</given-names></name> <name><surname>Yu</surname><given-names>L.</given-names></name> <name><surname>Zhao</surname><given-names>L.</given-names></name> <name><surname>Qin</surname><given-names>J.</given-names></name> <name><surname>Wang</surname><given-names>D.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Automatic detection of cerebral microbleeds from MR images via 3D convolutional neural networks</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>35</volume>, <fpage>1182</fpage>&#x2013;<lpage>1195</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2016.2528129</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fan</surname><given-names>P.</given-names></name> <name><surname>Shan</surname><given-names>W.</given-names></name> <name><surname>Yang</surname><given-names>H.</given-names></name> <name><surname>Zheng</surname><given-names>Y.</given-names></name> <name><surname>Wu</surname><given-names>Z.</given-names></name> <name><surname>Chan</surname><given-names>S. W.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Cerebral microbleed automatic detection system based on the &#x201C;deep learning.&#x201D;</article-title>. <source>Front. Med.</source> <volume>9</volume>:<fpage>807443</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fmed.2022.807443</pub-id>, <pub-id pub-id-type="pmid">35402427</pub-id></mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fazekas</surname><given-names>F.</given-names></name> <name><surname>Kleinert</surname><given-names>R.</given-names></name> <name><surname>Roob</surname><given-names>G.</given-names></name> <name><surname>Kleinert</surname><given-names>G.</given-names></name> <name><surname>Kapeller</surname><given-names>P.</given-names></name> <name><surname>Schmidt</surname><given-names>R.</given-names></name> <etal/></person-group>. (<year>1999</year>). <article-title>Histopathologic analysis of foci of signal loss on gradient-echo T2&#x002A;-weighted MR images in patients with spontaneous intracerebral hemorrhage: evidence of microangiopathy-related microbleeds</article-title>. <source>AJNR Am. J. Neuroradiol.</source> <volume>20</volume>, <fpage>637</fpage>&#x2013;<lpage>642</lpage>.</mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Ghafaryasl</surname><given-names>B</given-names></name> <name><surname>van der Lijn</surname><given-names>F</given-names></name> <name><surname>Poels</surname><given-names>M</given-names></name> <name><surname>Vrooman</surname><given-names>H</given-names></name> <name><surname>Ikram</surname><given-names>MA</given-names></name> <name><surname>Niessen</surname><given-names>WJ</given-names></name> <etal/></person-group> &#x201C;A computer aided detection system for cerebral microbleeds in brain MRI.&#x201D; In: <italic>2012 9th IEEE international symposium on biomedical imaging (ISBI)</italic>. IEEE; (<year>2012</year>).</mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Greenberg</surname><given-names>S. M.</given-names></name> <name><surname>Vernooij</surname><given-names>M. W.</given-names></name> <name><surname>Cordonnier</surname><given-names>C.</given-names></name> <name><surname>Viswanathan</surname><given-names>A.</given-names></name> <name><surname>Al-Shahi Salman</surname><given-names>R.</given-names></name> <name><surname>Warach</surname><given-names>S.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>Cerebral microbleeds: a guide to detection and interpretation</article-title>. <source>Lancet Neurol.</source> <volume>8</volume>, <fpage>165</fpage>&#x2013;<lpage>174</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S1474-4422(09)70013-4</pub-id>, <pub-id pub-id-type="pmid">19161908</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gregoire</surname><given-names>S. M.</given-names></name> <name><surname>Chaudhary</surname><given-names>U. J.</given-names></name> <name><surname>Brown</surname><given-names>M. M.</given-names></name> <name><surname>Yousry</surname><given-names>T. A.</given-names></name> <name><surname>Kallis</surname><given-names>C.</given-names></name> <name><surname>J&#x00E4;ger</surname><given-names>H. R.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>The microbleed anatomical rating scale (MARS): reliability of a tool to map brain microbleeds</article-title>. <source>Neurology</source> <volume>73</volume>, <fpage>1759</fpage>&#x2013;<lpage>1766</lpage>. doi: <pub-id pub-id-type="doi">10.1212/WNL.0b013e3181c34a7d</pub-id>, <pub-id pub-id-type="pmid">19933977</pub-id></mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hampel</surname><given-names>H.</given-names></name> <name><surname>Elhage</surname><given-names>A.</given-names></name> <name><surname>Cho</surname><given-names>M.</given-names></name> <name><surname>Apostolova</surname><given-names>L. G.</given-names></name> <name><surname>Nicoll</surname><given-names>J. A. R.</given-names></name> <name><surname>Atri</surname><given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Amyloid-related imaging abnormalities (ARIA): radiological, biological and clinical characteristics</article-title>. <source>Brain</source> <volume>146</volume>, <fpage>4414</fpage>&#x2013;<lpage>4424</lpage>. doi: <pub-id pub-id-type="doi">10.1093/brain/awad188</pub-id>, <pub-id pub-id-type="pmid">37280110</pub-id></mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jenkinson</surname><given-names>M.</given-names></name> <name><surname>Beckmann</surname><given-names>C. F.</given-names></name> <name><surname>Behrens</surname><given-names>T. E. J.</given-names></name> <name><surname>Woolrich</surname><given-names>M. W.</given-names></name> <name><surname>Smith</surname><given-names>S. M.</given-names></name></person-group> (<year>2012</year>). <article-title>FSL</article-title>. <source>NeuroImage</source> <volume>62</volume>, <fpage>782</fpage>&#x2013;<lpage>790</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.09.015</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kuijf</surname><given-names>H. J.</given-names></name> <name><surname>Brundel</surname><given-names>M.</given-names></name> <name><surname>de Bresser</surname><given-names>J.</given-names></name> <name><surname>van Veluw</surname><given-names>S. J.</given-names></name> <name><surname>Heringa</surname><given-names>S. M.</given-names></name> <name><surname>Viergever</surname><given-names>M. A.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Semi-automated detection of cerebral microbleeds on 3.0 T MR images</article-title>. <source>PLoS One</source> <volume>8</volume>:<fpage>e66610</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0066610</pub-id>, <pub-id pub-id-type="pmid">23805246</pub-id></mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>T.</given-names></name> <name><surname>Zou</surname><given-names>Y.</given-names></name> <name><surname>Bai</surname><given-names>P.</given-names></name> <name><surname>Li</surname><given-names>S.</given-names></name> <name><surname>Wang</surname><given-names>H.</given-names></name> <name><surname>Chen</surname><given-names>X.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Detecting cerebral microbleeds via deep learning with features enhancement by reusing ground truth</article-title>. <source>Comput. Methods Prog. Biomed.</source> <volume>204</volume>:<fpage>106051</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cmpb.2021.106051</pub-id>, <pub-id pub-id-type="pmid">33831723</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lovelock</surname><given-names>C. E.</given-names></name> <name><surname>Cordonnier</surname><given-names>C.</given-names></name> <name><surname>Naka</surname><given-names>H.</given-names></name> <name><surname>Al-Shahi Salman</surname><given-names>R.</given-names></name> <name><surname>Sudlow</surname><given-names>C. L. M.</given-names></name><collab id="coll1">Edinburgh Stroke Study Group</collab> <etal/></person-group>. (<year>2010</year>). <article-title>Antithrombotic drug use, cerebral microbleeds, and intracerebral hemorrhage: a systematic review of published and unpublished studies: a systematic review of published and unpublished studies</article-title>. <source>Stroke</source> <volume>41</volume>, <fpage>1222</fpage>&#x2013;<lpage>1228</lpage>. Available online at: <ext-link xlink:href="https://www.ahajournals.org/doi/abs/10.1161/STROKEAHA.109.572594" ext-link-type="uri">https://www.ahajournals.org/doi/abs/10.1161/STROKEAHA.109.572594</ext-link></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Myung</surname><given-names>M. J.</given-names></name> <name><surname>Lee</surname><given-names>K. M.</given-names></name> <name><surname>Kim</surname><given-names>H.-G.</given-names></name> <name><surname>Oh</surname><given-names>J.</given-names></name> <name><surname>Lee</surname><given-names>J. Y.</given-names></name> <name><surname>Shin</surname><given-names>I.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Novel approaches to detection of cerebral microbleeds: single deep learning model to achieve a balanced performance</article-title>. <source>J. Stroke Cerebrovasc. Dis.</source> <volume>30</volume>:<fpage>105886</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jstrokecerebrovasdis.2021.105886</pub-id>, <pub-id pub-id-type="pmid">34175642</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Selvaraju</surname><given-names>RR</given-names></name> <name><surname>Cogswell</surname><given-names>M</given-names></name> <name><surname>Das</surname><given-names>A</given-names></name> <name><surname>Vedantam</surname><given-names>R</given-names></name> <name><surname>Parikh</surname><given-names>D</given-names></name></person-group>. &#x201C;Grad-CAM: visual explanations from deep networks via gradient-based localization.&#x201D; In: <italic>2017 IEEE international conference on computer vision (ICCV)</italic>. IEEE; (<year>2017</year>). Available online at: <ext-link xlink:href="http://openaccess.thecvf.com/content_iccv_2017/html/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.html" ext-link-type="uri">http://openaccess.thecvf.com/content_iccv_2017/html/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.html</ext-link></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Son</surname><given-names>S.</given-names></name> <name><surname>Joo</surname><given-names>B.</given-names></name> <name><surname>Park</surname><given-names>M.</given-names></name> <name><surname>Suh</surname><given-names>S. H.</given-names></name> <name><surname>Oh</surname><given-names>H. S.</given-names></name> <name><surname>Kim</surname><given-names>J. W.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Development of RLK-Unet: a clinically favorable deep learning algorithm for brain metastasis detection and treatment response assessment</article-title>. <source>Front. Oncol.</source> <volume>13</volume>:<fpage>1273013</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fonc.2023.1273013</pub-id>, <pub-id pub-id-type="pmid">38288101</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sudre</surname><given-names>C. H.</given-names></name> <name><surname>Van Wijnen</surname><given-names>K.</given-names></name> <name><surname>Dubost</surname><given-names>F.</given-names></name> <name><surname>Adams</surname><given-names>H.</given-names></name> <name><surname>Atkinson</surname><given-names>D.</given-names></name> <name><surname>Barkhof</surname><given-names>F.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Where is VALDO? Vascular lesions detection and segmentation challenge at MICCAI 2021</article-title>. <source>Med. Image Anal.</source> <volume>91</volume>:<fpage>103029</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.media.2023.103029</pub-id>, <pub-id pub-id-type="pmid">37988921</pub-id></mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tsuchida</surname><given-names>A.</given-names></name> <name><surname>Goubet</surname><given-names>M.</given-names></name> <name><surname>Boutinaud</surname><given-names>P.</given-names></name> <name><surname>Astafeva</surname><given-names>I.</given-names></name> <name><surname>Nozais</surname><given-names>V.</given-names></name> <name><surname>Herv&#x00E9;</surname><given-names>P.-Y.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>SHIVA-CMB: a deep-learning-based robust cerebral microbleed segmentation tool trained on multi-source T2&#x002A;GRE- and susceptibility-weighted MRI</article-title>. <source>Sci. Rep.</source> <volume>14</volume>:<fpage>30901</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-024-81870-5</pub-id>, <pub-id pub-id-type="pmid">39730628</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tustison</surname><given-names>N. J.</given-names></name> <name><surname>Avants</surname><given-names>B. B.</given-names></name> <name><surname>Cook</surname><given-names>P. A.</given-names></name> <name><surname>Zheng</surname><given-names>Y.</given-names></name> <name><surname>Egan</surname><given-names>A.</given-names></name> <name><surname>Yushkevich</surname><given-names>P. A.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>N4ITK: improved N3 bias correction</article-title>. <source>IEEE Trans. Med. Imaging</source> <volume>29</volume>, <fpage>1310</fpage>&#x2013;<lpage>1320</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TMI.2010.2046908</pub-id>, <pub-id pub-id-type="pmid">20378467</pub-id></mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wei</surname><given-names>Z.</given-names></name> <name><surname>Chen</surname><given-names>X.</given-names></name> <name><surname>Huang</surname><given-names>J.</given-names></name> <name><surname>Wang</surname><given-names>Z.</given-names></name> <name><surname>Yao</surname><given-names>T.</given-names></name> <name><surname>Gao</surname><given-names>C.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Construction of a medical micro-object cascade network for automated segmentation of cerebral microbleeds in susceptibility weighted imaging</article-title>. <source>Front. Bioeng. Biotechnol.</source> <volume>10</volume>:<fpage>937314</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fbioe.2022.937314</pub-id>, <pub-id pub-id-type="pmid">35935490</pub-id></mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Woo</surname><given-names>S.</given-names></name> <name><surname>Park</surname><given-names>J.</given-names></name> <name><surname>Lee</surname><given-names>J.-Y.</given-names></name> <name><surname>Kweon</surname><given-names>I. S.</given-names></name></person-group> (<year>2018</year>). &#x201C;<chapter-title>CBAM: convolutional block attention module</chapter-title>&#x201D; in <source>Computer vision &#x2013; ECCV 2018</source> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>3</fpage>&#x2013;<lpage>19</lpage>. Available online at: <ext-link xlink:href="http://openaccess.thecvf.com/content_ECCV_2018/html/Sanghyun_Woo_Convolutional_Block_Attention_ECCV_2018_paper.html%0d%0a" ext-link-type="uri">http://openaccess.thecvf.com/content_ECCV_2018/html/Sanghyun_Woo_Convolutional_Block_Attention_ECCV_2018_paper.html</ext-link></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0002">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/11470/overview">David C. Zhu</ext-link>, Albert Einstein College of Medicine, United States</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0003">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1289135/overview">Jiaen Liu</ext-link>, University of Texas Southwestern Medical Center, United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1550000/overview">Palash Ghosal</ext-link>, Sikkim Manipal University, India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3351535/overview">Behnam Kazemivash</ext-link>, Albert Einstein College of Medicine, United States</p>
</fn>
</fn-group>
</back>
</article>