<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Oncol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Oncology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Oncol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2234-943X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fonc.2026.1759194</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Ultrasound-based artificial intelligence for breast lesion classification</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Ma</surname><given-names>Ting</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2021;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2355527/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Wang</surname><given-names>Zhen</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn003"><sup>&#x2020;</sup></xref>
<xref ref-type="author-notes" rid="fn004"><sup>&#x2021;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2854839/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Dong</surname><given-names>Jian</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2849830/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Cheng</surname><given-names>Yuhang</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2827967/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zhao</surname><given-names>Huan</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2926200/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Cui</surname><given-names>Xinwu</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/937886/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project-administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Medical Ultrasound, Tongji Hospital, Tongji Medical College, Huazhong University of Science and Technology</institution>, <city>Wuhan</city>,&#xa0;<country country="cn">China</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Medical Ultrasound, The First Affiliated Hospital of Shihezi University</institution>, <city>Shihezi</city>, <state>Xinjiang</state>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Xinwu Cui, <email xlink:href="mailto:cuixinwu@live.cn">cuixinwu@live.cn</email></corresp>
<fn fn-type="equal" id="fn003">
<p>&#x2020;These authors have contributed equally to this work and share first authorship</p></fn>
<fn fn-type="other" id="fn004">
<p>&#x2021;ORCID: Ting Ma, <uri xlink:href="https://orcid.org/0009-0005-6814-5788">orcid.org/0009-0005-6814-5788</uri>; Zhen Wang, <uri xlink:href="https://orcid.org/0009-0002-8016-0030">orcid.org/0009-0002-8016-0030</uri></p></fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-26">
<day>26</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>16</volume>
<elocation-id>1759194</elocation-id>
<history>
<date date-type="received">
<day>02</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>10</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>31</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Ma, Wang, Dong, Cheng, Zhao and Cui.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Ma, Wang, Dong, Cheng, Zhao and Cui</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-26">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Breast cancer is the most prevalent cancer among women. Early and accurate screening is crucial for improving patient outcomes. Ultrasound is a valuable diagnostic tool, particularly for dense breasts, yet its efficacy can be limited by operator dependency and interpretive variability. Artificial intelligence (AI) has shown significant potential to enhance the accuracy and efficiency of breast ultrasound. However, translating AI from research to clinical practice remains challenging due to several persistent gaps: the lack of robust clinical validation for generative AI in image enhancement; insufficient focus on AI for diagnosing non-mass lesions, which constitute a notable proportion of malignancies; and limited multi-center effectiveness data for commercial computer-aided diagnosis systems. This narrative review synthesizes recent advancements in AI for breast ultrasound and provides a critical, multifaceted analysis that integrates technological evolution, clinical-translation challenges, and implementation frameworks. Importantly, it highlights pervasive methodological limitations, such as small sample sizes, retrospective single-center designs, and inadequate external validation, that often lead to overestimation of real-world AI performance. By offering both actionable insights and a cautionary perspective, this review aims to guide the rigorous, evidence-based translation of AI into clinically viable tools.</p>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>breast lesion</kwd>
<kwd>convolutional neural networks</kwd>
<kwd>deep learning</kwd>
<kwd>ultrasound</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by National Natural Science Foundation of China (82460353), and Bingtuan Science and Technology Program (2024ZD056).</funding-statement>
</funding-group>
<counts>
<fig-count count="3"/>
<table-count count="6"/>
<equation-count count="0"/>
<ref-count count="100"/>
<page-count count="20"/>
<word-count count="12924"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cancer Imaging and Image-directed Interventions</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Breast cancer (BC) is a heterogeneous disease caused by the complex interaction of multiple factors such as genetics, lifestyle, and hormones (<xref ref-type="bibr" rid="B1">1</xref>). Global cancer-related reports in 2022 proposed 2,308,897 new cases of BC in females, which is behind lung cancer as the second most prevalent cancer worldwide (11.6%) (<xref ref-type="bibr" rid="B2">2</xref>). Most women with early-stage BC may have the opportunity to opt for breast-conserving surgery, a less invasive option that offers a better prognosis (<xref ref-type="bibr" rid="B3">3</xref>). Advanced BC may metastasize to distant tissues and organs such as the lungs, spine, and liver, thereby limiting patients&#x2019; access to optimal treatment and ultimately leading to a grim prognosis. The survival rate of BC patients with advanced cancer or distant metastases is only three years, and a five-year survival rate of approximately 26% alongside a terrible quality of life (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B5">5</xref>). Despite a recent decline in mortality rates, BC remains a significant cause of death among women globally, with an estimated 665,684 deaths annually. It accounts for 6.9% of all cancer-related deaths in women, which seriously threatens women&#x2019;s health and global public health security (<xref ref-type="bibr" rid="B2">2</xref>, <xref ref-type="bibr" rid="B6">6</xref>). Therefore, early and accurate screening to assess breast lesions, understand the trend of BC changes, and guide the early treatment of BC patients is the key to improving patient outcomes and prognosis.</p>
<p>The diagnosis of breast lesions involves a comprehensive evaluation of clinical symptoms, physical findings, tumor marker tests, radiological features, and pathology tests. Currently, mammography and ultrasound are widely used as the primary imaging tools for breast lesion screening. Ultrasound and screening mammograms exhibit similar sensitivity, specificity, cancer detection rates, and biopsy rates. However, ultrasound screening has a higher rate of detecting invasive cancers (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B8">8</xref>). The high contrast resolution of ultrasound on soft tissue makes it independent of gland type and avoids the requirement for breast compression, making it more suitable for early screening in relatively young women with dense glandular types (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B10">10</xref>). Recently, advancements in automated breast ultrasound systems have addressed the operator-dependent limitations of conventional handheld ultrasound devices by providing 3D formatted images, which improve the diagnostic accuracy (<xref ref-type="bibr" rid="B11">11</xref>). In addition, ultrasound has demonstrated superior diagnostic capabilities compared to other imaging methods in detecting axillary lymph node metastases in patients with BC. Its benefits such as the absence of ionizing radiation, the ability to guide cytological and histological diagnostic studies, and the support of surgical interventions, have contributed to the growing use of ultrasound in the screening and descriptive breast lesion classification.</p>
<p>The identification of benign and malignant classification of breast lesions is a crucial step following lesion screening and an essential component of the clinical diagnosis and treatment of BC. Currently, the potential role of artificial intelligence (AI) algorithms in ultrasound has been widely explored (<xref ref-type="bibr" rid="B12">12</xref>). AI, a branch of data science, can extract numerous image features that are invisible to the naked eye based on the radiomics of ultrasound (<xref ref-type="bibr" rid="B13">13</xref>). The AI system provides a predictive modeling train on high-throughput quantitative features extracted from radiological data through image processing, segmentation, and feature extraction. Automated breast ultrasound systems allow for the standardized acquisition of comprehensive breast images, mitigating operator-dependent variability and reducing interference from extramammary factors that can compromise image quality (<xref ref-type="bibr" rid="B14">14</xref>). A complete standardized AI-assisted breast ultrasound procedure takes about two minutes, reducing interpretation and reporting time by approximately 40%, maximizing the efficiency of the sonographer&#x2019;s task (<xref ref-type="bibr" rid="B15">15</xref>, <xref ref-type="bibr" rid="B16">16</xref>). AI feeds through the amount of image data from various modalities to provide accurate malignant risk assessments of the breast lesion, which improves the diagnostic accuracy of less-experienced sonographers and frees up physician resources. Therefore, in-depth research on AI in breast ultrasound is essential. Despite the valuable contributions of prior reviews, several persistent gaps impede the translation of AI from research to clinical practice. Specifically, three underexplored yet critical areas include: (I) the clinical validation gap for generative AI (GenAI), particularly in ultrasound image enhancement and microbubble localization; (II) the diagnostic challenge of non-mass lesions, which represent 15&#x2013;20% of breast malignancies but are often overlooked in AI studies; and (III) the lack of multi-center performance data for commercial computer-aided detection and diagnosis (CAD) systems, limiting their generalizability and clinical trust. This review is positioned to address these gaps by not only synthesizing technical advancements but also systematically examining translational pathways and implementation barriers.</p>
<p>While recent reviews have advanced breast cancer diagnostics, critical knowledge and translational gaps persist specifically in the application of AI to breast ultrasound. Systematic analyses have meticulously evaluated the promise of emerging imaging modalities like hyperspectral imaging for CAD, and broader assessments have critiqued the translational pathway of multiple novel techniques from innovation to clinical application (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B18">18</xref>). However, these studies either focus on alternative imaging technologies or provide a high-level overview across modalities, leaving a focused, in-depth synthesis of AI&#x2019;s evolution and integration within the established, first-line screening tool, breast ultrasound, less comprehensively addressed. Furthermore, discussions on socioeconomic barriers to prevention highlight crucial contextual challenges in healthcare delivery, underscoring the need for AI solutions that are not only technologically robust but also adaptable to diverse clinical and resource settings (<xref ref-type="bibr" rid="B19">19</xref>).</p>
<p>Consequently, this narrative review aims to fill these voids by concentrating on the intersection of artificial intelligence and breast ultrasound imaging. We posit that despite ultrasound&#x2019;s primary role in screening, particularly for dense breasts, a focused analysis of how AI models are evolving to interpret its imagery, overcome its operator-dependency, and integrate into clinical workflows is urgently needed. Key underexplored areas include the clinical validation of generative AI for image enhancement, the diagnostic handling of challenging non-mass lesions often missed in AI training sets, and the real-world, multi-center performance data of commercial AI-CAD systems&#x2014;gaps that are pivotal for translation but not centrally addressed in prior syntheses.</p>
<p>To bridge these identified gaps, this review not only synthesizes state-of-the-art AI applications but also provides a critical and multifaceted analysis. This analysis scrutinizes AI model evolution, examines the methodological strengths and weaknesses of clinical translation studies, and dissects the implementation barriers of commercial frameworks in light of often-overlooked factors like dataset bias and generalizability deficits. We outline the current state of AI in breast ultrasound, with a particular focus on its application in lesion classification and malignancy grading. Beyond summarizing existing research, we critically examine translational challenges, explainable AI frameworks, and the integration of commercial CAD systems into clinical workflows. By synthesizing technical advancements with a critical appraisal of real-world diagnostic challenges and the methodological constraints of existing studies, this review provides a balanced perspective that bridges the gap between AI research and the requirements for robust, generalizable clinical practice. It offers actionable insights not only on technological trends but also on the translational pathways and implementation hurdles. Critically, it distinguishes between the promising efficacy demonstrated in research settings, often reliant on single-center, retrospective data, and the robust evidence required for clinical viability. This distinction provides a more realistic roadmap for researchers and clinicians, highlighting that the development and deployment of reliable AI solutions in breast ultrasound necessitate a focus on prospective validation and generalizability, alongside their practical utility.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Methodology and scope of this review</title>
<p>This work is structured as a critical narrative review. Its primary objective is to identify, synthesize, and critically analyze the current state of artificial intelligence in breast ultrasound, with a focused aim of bridging the translational gaps between research prototypes, commercial systems, and clinical practice. Unlike a systematic review with a narrow PICO question, this review adopts a broader perspective to provide a comprehensive, interdisciplinary analysis of technological evolution, clinical applications, and implementation challenges.</p>
<p>To construct this narrative, we undertook a structured but non-exhaustive literature search to identify foundational and impactful works. Electronic databases, including PubMed, IEEE Xplore, and Google Scholar, were queried using combinations of keywords such as &#x201c;artificial intelligence&#x201d;, &#x201c;deep learning&#x201d;, &#x201c;convolutional neural network&#x201d;, &#x201c;breast ultrasound&#x201d;, &#x201c;breast lesion classification&#x201d;, &#x201c;computer-aided diagnosis&#x201d;, &#x201c;radiomics&#x201d;, and &#x201c;generative AI&#x201d;. Given the review&#x2019;s translational focus, we prioritized peer-reviewed original research, high-impact review articles, clinical validation studies, and key reports on commercial CAD systems. Special emphasis was placed on identifying literature that directly informed the three critical gaps outlined in the introduction: (1) clinical validation of generative AI, (2) diagnosis of non-mass lesions, and (3) multi-center performance of commercial systems. No formal quality scoring tool was applied, but studies were critically appraised based on sample size, study design, validation method, and clarity of reported metrics.</p>
<p>The identified literature was analyzed thematically rather than quantitatively. The synthesis is organized to first establish the technological foundations, then examine applications and clinical decision-making, evaluate commercial systems and translational readiness, and finally propose a future roadmap. This structure is designed to facilitate a critical examination of the field&#x2019;s progress and persistent bottlenecks from multiple angles.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Overview of artificial intelligence technologies</title>
<sec id="s3_1">
<label>3.1</label>
<title>Artificial intelligence</title>
<p>The concept of AI was first introduced to the public by John McCarthy in 1956 during the Dartmouth Conference. AI aims to develop systems that assist with tasks requiring human expertise. In medical imaging, AI excels at analyzing complex image data to identify patterns and support diagnostic decisions, offering tools like radiomics for quantitative feature analysis (<xref ref-type="bibr" rid="B20">20</xref>).</p>
<p>AI infrastructure layer supported by machine learning (ML) core algorithms and AI infrastructure frameworks supports and realizes AI applications by learning the internal laws of data and training models (<xref ref-type="bibr" rid="B21">21</xref>). For medical image analysis, ML and its advanced subset, deep learning (DL), are the most relevant AI paradigms. Convolutional neural networks (CNNs), a specialized DL architecture, are particularly pivotal for ultrasound image interpretation. Their key characteristics are summarized in <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>The key differences between machine learning, deep learning, and convolutional neural network.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">AI technology</th>
<th valign="top" align="center">Brief definition</th>
<th valign="top" align="center">Core technology</th>
<th valign="top" align="center">Data requirements</th>
<th valign="top" align="center">Representative model</th>
<th valign="top" align="center">Application of breast ultrasound</th>
<th valign="top" align="center">Advantages and limitations</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">ML</td>
<td valign="top" align="left">An algorithm framework for learning patterns from data</td>
<td valign="top" align="left">Supervised Learning<break/>Unsupervised Learning<break/>Feature Engineering</td>
<td valign="top" align="left">Medium-scale annotation data</td>
<td valign="top" align="left">RF<break/>SVM</td>
<td valign="top" align="left">Classification of lesion echo features</td>
<td valign="top" align="left">Advantages: Strong interpretability<break/>Limitations: Relying on manual feature extraction makes it difficult to handle complex patterns</td>
</tr>
<tr>
<td valign="top" align="left">DL</td>
<td valign="top" align="left">AI branch driven by multi-layer neural networks</td>
<td valign="top" align="left">Back-propagation<break/>Automatic feature extraction</td>
<td valign="top" align="left">Large-scale annotated data</td>
<td valign="top" align="left">ResNet<break/>DenseNet</td>
<td valign="top" align="left">Dynamic analysis of ultrasound video</td>
<td valign="top" align="left">Advantages: end-to-end learning<break/>Limitations: High computational resource requirements and poor model interpretability</td>
</tr>
<tr>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">DL architecture is designed specifically for images, preserving spatial relationships</td>
<td valign="top" align="left">Convolutional kernels<break/>Pooling operations</td>
<td valign="top" align="left">Large-scale image data</td>
<td valign="top" align="left">AlexNet<break/>U-Net</td>
<td valign="top" align="left">Microcalcification detection,<break/>Classification of non-mass lesions</td>
<td valign="top" align="left">Advantages: High efficiency in image feature extraction<break/>Limitations: Sensitive to ultrasound speckle noise, requiring standardized image input</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>ML, Machine Learning; DL, Deep Learning; CNN, Convolutional Neural Network; RF, random forest; SVM, Support Vector Machine; ResNet, Residual Network.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>AI opens unprecedented possibilities for medical research, particularly in complex data analysis, pattern discovery, and predictive modeling. AI big data analytics are forming an emerging field through effective integration with medical imaging, especially noninvasive advanced imaging analysis, known as radiomics. AI analyzes lesion features extracted from large volumes of digital quantitative information from radiological imaging to provide predictive models. AI algorithms are trained by extracting and analyzing markers, such as pathology test results and genetic microcoding, from patient diagnosis or prognosis. The AI system processes image data from various imaging modalities to obtain the output information and continually undergoes self-correction and re-learning to make diagnostic and treatment decisions (<xref ref-type="bibr" rid="B22">22</xref>). Imaging physicians, after scrutinizing numerous image data for diagnosis, may suffer from fatigue, potentially resulting in missed diagnoses or misdiagnoses. AI has shown significant potential in revolutionizing medical diagnostics, offering high accuracy and efficiency in identifying diseases such as cancer at early stages and providing personalized treatment plans by analyzing vast amounts of medical data. to mimic the work of humans in medical image processing for a long period and make the correct diagnosis, improving the efficiency of radiologists by reducing interpretation time (<xref ref-type="bibr" rid="B23">23</xref>). AI-based radiomics is increasingly utilized in diagnostic ultrasound. The pre-training of AI models on extensive image datasets is a critical step in leveraging AI for radiomics, particularly in diagnostic ultrasound, as demonstrated by recent advancements in breast ultrasound and liver fibrosis staging. The pre-training of models extracts deep features from rich ultrasound images, which in turn can help identify new biomarkers or develop new criteria for diagnosing a specific disease. These trained models can assist clinicians in interpreting ultrasound images and automatically provide diagnostic results or recommendations, which not only saves time and improves access to healthcare but also optimizes ultrasound diagnosis.</p>
<p>In 2022, generative AI (GenAI) emerged as a transformative branch of AI, capable of creating new data instances that resemble the training distribution (<xref ref-type="bibr" rid="B24">24</xref>). Within breast ultrasound, GenAI holds immediate promise for addressing two pivotal challenges: image quality enhancement and data scarcity (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>). For image enhancement, GenAI models, particularly those based on Generative Adversarial Networks (GANs) or diffusion models, can be employed to reduce characteristic speckle noise, improve image resolution, and synthesize one imaging modality from another. These applications aim to improve lesion conspicuity and feature extraction, directly impacting diagnostic confidence. For data augmentation, GenAI can generate high-quality, annotated synthetic breast ultrasound images. This is crucial for expanding training datasets, balancing class distributions, and simulating rare or challenging presentations like non-mass lesions, thereby mitigating the data-hungry nature of deep learning models.</p>
<p>Currently, GenAI is increasingly being used in healthcare to inform clinical service functions. GenAI can improve diagnostic accuracy through knowledge acquisition for screening and diagnosing diseases. While GenAI is still in the process of being fully realized, the healthcare industry has already seen significant strides in its application, with 62% of healthcare and life science executives implementing GenAI solutions and 74% experiencing investment returns in at least one use case. Continued research and development are essential to deepen the integration of GenAI in healthcare, particularly in areas such as automated services, to further revolutionize drug development and personalized patient care.</p>
<p>However, a pronounced chasm separates this technical potential from validated clinical utility. The majority of GenAI applications in breast ultrasound are proof-of-concept demonstrations reliant on small, often idealized datasets, remaining far from clinical readiness. The most critical evidence gap, as highlighted earlier, is the near absence of robust, multi-center clinical trials. Such trials are essential to determine if GenAI-driven enhancements yield measurable, reproducible improvements in diagnostic accuracy and workflow efficiency across diverse clinical environments, or if reported gains are specific to constrained experimental settings.</p>
<p>The evolutionary path forward for AI in breast ultrasound points toward systems with enhanced multimodal integration and clinical context-aware decision support, moving beyond single-modality, task-specific classifiers. Rather than pursuing the broad and distant goal of Artificial General Intelligence (AGI), the near-term focus is on developing specialized AI systems that can emulate a radiologist&#x2019;s integrative reasoning process. Such systems aim to automatically fuse and interpret diverse data streams available in a clinical setting. This includes correlating features across multiple ultrasound modalities from the same exam and incorporating relevant clinical context from electronic health records.</p>
<p>The technical foundation for this next stage lies in advancing multimodal learning architectures, cross-modal representation learning, and explainable AI (XAI) frameworks that make the integrated decision-making process transparent. By doing so, AI has the potential to evolve from an image analysis tool into a comprehensive clinical decision-support partner. It could provide a unified risk assessment, suggest differential diagnoses, and flag inconsistencies, thereby assisting in complex case evaluation and management planning.</p>
<p>This progression from an image analysis tool to a clinical decision-support system underscores the translational goal of AI: to augment and standardize the radiologist&#x2019;s cognitive process, thereby enhancing the consistency and accuracy of diagnostic outcomes across diverse clinical settings and levels of expertise. Achieving this requires concerted efforts in creating large, curated, multimodal datasets and developing robust validation frameworks to ensure these advanced systems are safe, effective, and trustworthy in real-world clinical workflows.</p>
</sec>
<sec id="s3_2">
<label>3.2</label>
<title>Machine learning</title>
<p>ML is an emerging field that combines computer science and statistics. Unlike classical AI programming, which executes algorithms to produce results, ML enables computers to learn from input data without being explicitly programmed to use the dataset and associated outputs to generate algorithms that can describe the relationship between the two (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B28">28</xref>).</p>
<sec id="s3_2_1">
<label>3.2.1</label>
<title>Traditional machine learning tasks</title>
<p>Supervised learning, as the most dominant ML approach, focuses on inferring algorithms from data features and target outputs, back-propagating from target outputs to predict the accuracy of the algorithms. The algorithm optimization process involves assessing model accuracy on the new dataset, which subsequently guides necessary adjustments to the algorithm. Supervised Learning requires a large amount of well-labeled data for classification to train the algorithm (<xref ref-type="bibr" rid="B29">29</xref>).</p>
<p>Unsupervised learning consists of unlabeled data presented to an algorithm to find implicit patterns. Unsupervised learning techniques, such as clustering, correlation, and anomaly detection, focus on detecting centralized patterns of correlation between data and categorizing the individual data in the dataset (<xref ref-type="bibr" rid="B30">30</xref>). Common algorithms in this field include clustering review metrics, principal component analysis, and independent component analysis.</p>
<p>Semi-supervised learning, particularly in medical image segmentation, is widely adopted as it leverages both labeled and unlabeled datasets to enhance the performance of models, often outperforming purely unsupervised approaches. Due to the time-consuming and laborious nature of labeling large datasets, a practical approach is to label a small subset for training, enabling the model to classify the remaining unlabeled images. This labeled dataset subsequently serves to train a new, functional model, and the labeled classification of large data and iterative refinement of the model is performed over time (<xref ref-type="bibr" rid="B31">31</xref>, <xref ref-type="bibr" rid="B32">32</xref>).</p>
</sec>
<sec id="s3_2_2">
<label>3.2.2</label>
<title>Common algorithm models in machine learning</title>
<p>ML algorithms represented by linear regression, logistic regression (LR), decision tree (DT), random forest (RF), and support vector machine have been proficiently applied in medicine.</p>
<p>Linear regression is based on the assumption of a linear relationship and minimizes the squared difference between predicted and actual values by finding the most suitable straight line for the data (see <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1A</bold></xref>). Linear functions are simple, intuitive, and fast, but they poorly fit nonlinear data and are sensitive to outliers.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Schematic diagram of common algorithm models in traditional machine learning. <bold>(A)</bold> Comparison of linear regression and logistic regression, illustrating their decision boundaries; <bold>(B)</bold> Schematic of the Random Forest structure, demonstrating parallel tree processing; <bold>(C)</bold> Decision tree flowchart with classification branches and resulting data separation in feature space.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1759194-g001.tif">
<alt-text content-type="machine-generated">Panel A shows a line graph comparing linear regression, depicted as a straight brown line, and logistic regression, shown as a green S-shaped curve, with horizontal dashed and solid reference lines. Panel B features a diagram of multiple tree structures processing initial data in parallel to generate multiple outputs, which combine into an overall output. Panel C includes a decision tree flowchart illustrating nodes splitting data into subsets based on yes or no branches, and a scatter plot demonstrating a red boundary, created by decision tree logic, separating two categories labeled as category one and category two.</alt-text>
</graphic></fig>
<p>LR is a statistical model for binary classification problems, mainly used to find the probabilistic relationship between data features of the input variables and the target outcome. (see <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1A</bold></xref>). The LR model can be flexibly implemented to analyze the effect of binomial or polynomial features on the target output. It is simple and computationally fast with good interpretability, but only applicable to linearly separable data and needs to be extended for multiclass problems.</p>
<p>DT accomplishes the dual task of classification and regression. It learns mapping relationships from data features to outputs by DT using a binary-like recursive partitioning method. DT splits the dataset layer by layer based on specific features to form a series of rules that combine to resemble a tree (see <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1C</bold></xref>).</p>
<p>RF belongs to the category of integrated learning, which performs model operations by generating multiple DT (see <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1B</bold></xref>). Each DT is created by subsampling features, and the majority of the results calculated within it are used as the final class prediction of the model. This approach enhances the accuracy and stability of the predictions. But it does not have the ability to handle difficult samples.</p>
</sec>
<sec id="s3_2_3">
<label>3.2.3</label>
<title>Achievements and development of traditional machine learning</title>
<p>ML analyzes and processes information by extracting lesion features and provides the final algorithm to assist radiologists to detect and analyze lesions. Traditional supervised learning techniques such as LR, RF in ultrasound image analysis have made significant progress. Their applications mainly focus on the following aspects: (I)image feature extraction: automatically extracting key features in ultrasound images, such as tumor composition, echo, edge, shape, and texture, etc., by ML algorithms; (II)image classification and diagnosis: using extracted lesion features to assist doctors in disease diagnosis; (III)radiomics: combining ML techniques to extract the amount of high-throughput features for disease prediction and classification.</p>
<p>Conventional ML was developed for its accuracy and reliability in ultrasound image analysis. ML, especially RF algorithmic models, can automatically extract features from images, reducing the reliance on expert experience and making ultrasound image analysis more objective and standardized. A study of 895 ultrasound images found that the ultrasound-based RF algorithm performed best compared to the remaining algorithms in classification (ACC: 0.90, AUCPR: 0.90, F1 score: 0.83) (<xref ref-type="bibr" rid="B33">33</xref>). The ML model demonstrated excellent stability and accuracy with its powerful feature selection capability and classification performance.</p>
</sec>
<sec id="s3_2_4">
<label>3.2.4</label>
<title>Limitations of traditional machine learning</title>
<p>Nonetheless, research into ML algorithms at this juncture encounters obstacles like limited dataset size and intricate algorithm implementation. The continuous development of ultrasound imaging technology has opened up a new direction for improving imaging resolution, but further research and optimization are needed to overcome the existing challenges.</p>
</sec>
</sec>
<sec id="s3_3">
<label>3.3</label>
<title>Deep learning</title>
<p>DL cognitions features and patterns from data by constructing neural network models with multiple layers. It can autonomously &#x201c;learn&#x201d; from erroneous algorithmic outputs without human intervention, enhancing algorithmic stability and accuracy (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B35">35</xref>). Compared to traditional ML methods, DL offers greater feature extraction capabilities and higher model complexity. That is capable of handling large-scale data and complex tasks.</p>
<sec id="s3_3_1">
<label>3.3.1</label>
<title>Common neural network algorithm models for deep learning</title>
<p>DL-related algorithmic models mainly include Deep neural network (DNN), Recurrent neural network (RNN), Generative adversarial networks (GAN), and Convolutional neural network (CNN).</p>
<p>DNN is one of the most basic models of DL and usually includes an input layer, a hidden layer, and an output layer. The hidden layer can have multiple layers, each containing multiple neurons (<xref ref-type="bibr" rid="B36">36</xref>).</p>
<p>The CNN calculates error through a loss function as the algorithm runs, applying a backpropagation algorithm to instruct the machine on how to change its internal parameters and continually updating the connection weights to adjust them to better fit the relationship between the input data and the interested outputs (<xref ref-type="bibr" rid="B37">37</xref>). The algorithms provide positive feedback to reinforce the desired output. The relationship between input features and outputs is learned through layers of incremental computation and optimization, gradually improving the ability to process the input data and the accuracy of the outputs (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>).</p>
<p>RNN takes the input of the current moment and the output of the previous moment simultaneously as the overall input of the current moment, utilizing a recurrent structure and introducing a temporal dimension. RNN automatically learns temporal dependencies in sequential data, efficiently processing sequential inputs of data like language, voice, text, and time series (<xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B40">40</xref>, <xref ref-type="bibr" rid="B41">41</xref>). Additionally, cyclic highway networks have further developed long- and short-term memory architectures and have solved the gradient problem of traditional RNN to better capture long-term dependencies in sequences.</p>
<p>As the name suggests, GAN is a DL model consisting of two neural networks, the generator and the discriminator, competing and cooperating, which belongs to the category of unsupervised learning. In radiology, GAN can synthesize realistic medical images (<xref ref-type="bibr" rid="B42">42</xref>).</p>
</sec>
<sec id="s3_3_2">
<label>3.3.2</label>
<title>Achievements and development in deep learning</title>
<p>DL has been developed significantly in medical imaging, where DL techniques can directly encode mappings. A meta-analysis evaluating the diagnostic accuracy of DL algorithms found that they have high diagnostic accuracy for a wide range of common diseases in terms of X-ray, ultrasound, CT, and MRI. Its application in ultrasound is also gradually transforming traditional ultrasound diagnostic methods (<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B44">44</xref>).</p>
<p>Ultrasound images can be corrupted by scattering noise, hindering disease diagnosis and treatment progression. DL techniques hold the potential to enhance the clarity of ultrasound images and diminish noise levels. Luijten B. et&#xa0;al. used adaptive signal processing algorithms to constrain DNN, which can efficiently learn and perform fast, high-quality ultrasound beam formation using less training data (<xref ref-type="bibr" rid="B45">45</xref>). Liu et&#xa0;al. developed a CycleGAN model based on bi-directional universal mapping that enables style migration between the noisy data domain with scattering and the noisy data domain without noise, and demonstrated the superiority of the model in evaluating quantitative scattering signals as well as noise reduction and detail preservation, thus improving the quality of the ultrasound images (<xref ref-type="bibr" rid="B46">46</xref>). Sudharson S. et&#xa0;al. developed an integrated classification model with high accuracy (ACC: 0.95, 95% CI: 0.94 - 0.96) on ultrasound images with noisy speckles (<xref ref-type="bibr" rid="B47">47</xref>, <xref ref-type="bibr" rid="B48">48</xref>).</p>
<p>In breast ultrasound imaging, DL technology has proven to be a powerful, efficient, and highly accurate tool that can save examination time, improve lesion detection, and potentially compensate for physician experience deficits. A single-center retrospective study with 637 breast ultrasound images found that the DL model trained on small samples had a diagnostic AUC of 0.84 for breast lesions (SEN: 0.84, SPE: 0.80, PPV: 0.32, NPV: 0.97). The performance was comparable to the diagnostic results of radiologists and better than that of trained medical students. AI-assisted classification and diagnosis of breast disease may improve diagnostic accuracy for novice physicians (<xref ref-type="bibr" rid="B49">49</xref>).</p>
<p>DL has shown remarkable results in color Doppler imaging, shear-wave elastography (SWE), and ultrasonography. In breast lesion classification, color Doppler neural network models can achieve highly consistent interpretation results with experienced radiologists (AUC: 0.98 vs 0.95) and potentially automate routine characterization (<xref ref-type="bibr" rid="B50">50</xref>). A GAN-based microbubble localization model can detect microbubbles with an accuracy of 0.98, enabling high-precision localization of microbubbles in ultrasound contrast agents. The discovery of this model is crucial for the development of generalized solutions for different imaging conditions and types of biological tissues (<xref ref-type="bibr" rid="B51">51</xref>). Fei et&#xa0;al. investigated the synthesis of elasticity images via a multi-scale elasticity image synthesis network, and diagnostic tests demonstrated that the classification performance of the synthesized elasticity images was similar to that of the diagnostic performance of real elasticity analysis of semi-quantitative data (<xref ref-type="bibr" rid="B52">52</xref>).</p>
<p>Beyond basic screening and classification, DL models can determine whether the postoperative lesion staging of ductal carcinoma <italic>in situ</italic> diagnosed by preoperative hollow-core needle biopsy will be upgraded or not. Qian et&#xa0;al. retrospectively analyzed 360 images of confirmed ductal carcinoma <italic>in situ</italic> using a DL model applied to ultrasound images. The AUC of four DL models ranged from 0.72 to 0.80, with ResNet and Inception showing a more accurate diagnostic value (<xref ref-type="bibr" rid="B53">53</xref>). The ultrasound-based DNN prediction model can effectively predict patients with ductal carcinoma <italic>in situ</italic> who may be classified as upgraded after surgery, guiding the clinic to intervene as early as possible and make more accurate decisions.</p>
</sec>
<sec id="s3_3_3">
<label>3.3.3</label>
<title>Limitations of deep learning</title>
<p>With the ongoing technology development, the research and application of DL in medical imaging are moving in a deeper and wider direction. A fundamental constraint is the reliance on limited, high-quality labeled datasets, which not only restricts model training but, more critically, severely compromises generalizability. This issue of generalizability is fundamentally a problem of domain shift. AI models trained on data from one institution often experience significant performance degradation when applied to data from another, due to differences in ultrasound machine vendors, imaging protocols, operator techniques, and patient population characteristics. This equipment and acquisition heterogeneity represents a major, often under-appreciated, barrier to clinical deployment. Merely expanding the dataset size through multi-center collaboration is insufficient unless it explicitly addresses and quantifies this variability. To address this, expanding datasets through multi-center collaboration is essential, and this effort should be guided by a push towards standardized imaging protocols and annotation criteria to maximize data utility and minimize confounding technical heterogeneity.</p>
</sec>
</sec>
<sec id="s3_4">
<label>3.4</label>
<title>Convolutional neural networks</title>
<p>In image recognition tasks, each input to an artificial neural network corresponds to a pixel, without considering the connectivity between nodes in the layer, potentially losing the spatial context of image features in practical applications (<xref ref-type="bibr" rid="B54">54</xref>).CNN, a key subset of DL, preserves the spatial relationships between image pixels through the combinations of input, convolutional, pooling, fully connected, and output layers to automatically learn features and patterns in images(see <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>) (<xref ref-type="bibr" rid="B32">32</xref>).</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Convolutional neural network operation mode diagram.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1759194-g002.tif">
<alt-text content-type="machine-generated">Diagram illustrating a convolutional neural network architecture for image classification, beginning with an input ultrasound image, followed by multiple convolutional and pooling layers, leading to fully connected layers, and culminating in an output layer.</alt-text>
</graphic></fig>
<sec id="s3_4_1">
<label>3.4.1</label>
<title>Convolutional neural network operations</title>
<p>Convolutional operations are popular in edge detection, sharpening, and blurring tasks (<xref ref-type="bibr" rid="B55">55</xref>). CNN feeds the information from the image to some specific nodes in the next layer of nodes through convolutional filters, extracting various features of the input image at different levels, thus preserving the extraction of information from the feature space and generating the feature maps. The pooling layer reduces overfitting and decreases the in-plane dimensionality of the feature mapping. By sampling the output of the convolutional layer at each stage to decrease the resolution of the feature map, the model&#x2019;s parameters are reduced while preserving the fundamental characteristics of the source image (<xref ref-type="bibr" rid="B28">28</xref>). As the new matrix feature map is continuously presented and the pooling layer is run, the extracted features become more abstract.</p>
<p>The fully connected layer maps the features extracted by the convolutional and pooling layers to the derived final output of the model. The final feature image is compressed from its matrix representation and fed into a feed-forward neural network, which classifies the image based on the extracted feature information such as lesion texture and edges.</p>
</sec>
<sec id="s3_4_2">
<label>3.4.2</label>
<title>Strengths of convolutional neural networks</title>
<p>In a regular neural network, all neurons in each layer are randomly connected to all neurons in the subsequent layer. In practical CNN applications, obtaining high-quality, large-scale labeled data for training is often challenging due to difficulties such as ensuring data diversity, maintaining data quality, and managing high annotation costs. How to improve the model performance with limited data and parameters is an urgent challenge. However, neurons in each layer of a CNN do not form a one-to-one connection relationship with all neurons in the next layer, but rather feed the information of the image to a small portion of specific neuron nodes in the nodes of the next layer through a convolutional filter kernel (<xref ref-type="bibr" rid="B56">56</xref>). Only this small subset of specific neuron nodes needs to be trained. This drastically reduces the number of parameters that need to be learned, making the models more efficient to train and less prone to overfitting, especially with limited medical image data (<xref ref-type="bibr" rid="B29">29</xref>).</p>
<p>CNN enables fine-tuning algorithmic models that have been trained in other projects for new tasks and new datasets to rapidly reach model maturity, commonly known as &#x201c;transfer learning&#x201d;. Numerous studies have developed new models based on pre-trained CNN models, such as ImageNet, AlexNet, GoogleNet, VGGNet, DenseNet, and ResNet (<xref ref-type="bibr" rid="B57">57</xref>, <xref ref-type="bibr" rid="B58">58</xref>). Transfer learning involves leveraging a pre-trained model as a feature extractor, continually extracting features such as edges and curves for object detection and image classification and then quickly training with the new task dataset. The fully connected layers are substituted by the new set, generating the target output for the new task. Transfer learning can reduce training parameters and decrease the data requirements for developing CNN models by reusing the pre-trained model parameters (<xref ref-type="bibr" rid="B32">32</xref>).</p>
</sec>
<sec id="s3_4_3">
<label>3.4.3</label>
<title>Achievements and development of convolutional neural networks</title>
<p>In the medical field, CNN reduces the number of parameters by optimizing connections, improving computational efficiency. It can rapidly adapt to new diagnostic tasks based on existing pre-trained models through transfer learning techniques. Improving the interpretability of CNN is a research priority to enhance the trust of medical professionals in the models and to promote their application in key areas such as clinical diagnosis.</p>
<p>Numerous new CNN-based models demonstrate enhanced capabilities in feature extraction, classification, identification, and the detection of cancer in breast ultrasound images. Cao et&#xa0;al. conducted an analysis comparing the effectiveness of AlexNet, ZFNet, VGG16, GoogleNet, ResNet, and DenseNet models in the screening and classification of breast lesions. They found that the models have good diagnostic performance and DenseNet attained the highest accuracy of 0.85 for lesion classification and a predefined region of interest (ROI) of 0.88 (<xref ref-type="bibr" rid="B59">59</xref>). The classical LeNet architecture, which addresses the issue of missing linear units and enhances the discriminative ability of extracted features, achieved an accuracy of 0.90 for recognizing benign and malignant classification of breast ultrasound images (<xref ref-type="bibr" rid="B60">60</xref>). Alaa et&#xa0;al. also found that the optimized CNN model exhibited higher accuracy (<xref ref-type="bibr" rid="B61">61</xref>). The development of CNN video models has overcome the limitations of static ultrasound images. 3D ResNet-50 and KamNet can identify more detailed spatial and temporal information to accurately classify breast lesions, thereby improving the clinical diagnosis of BC (<xref ref-type="bibr" rid="B62">62</xref>, <xref ref-type="bibr" rid="B63">63</xref>). Jarosik P. et&#xa0;al. investigated the potential for creating a DL breast lesion classification model using raw RF ultrasound data and proposed a CNN model-based breast lesion classification method. is capable of automatically handling RF ultrasound signals and is trained on both the RF signals and their envelope samples. The classification performance substantially surpasses that of various other parametric classifiers (AUC: 0.77 vs. 0.64). This study expands the application of AI in breast ultrasound by uniting the model with new ultrasound techniques.</p>
</sec>
<sec id="s3_4_4">
<label>3.4.4</label>
<title>Limitations of convolutional neural networks</title>
<p>Nevertheless, CNN faces challenges in medical applications. First, the computational complexity of CNN is high, requiring a lot of computational resources and time. How to optimize the structure and algorithm of CNN to improve computational efficiency is an important direction for future research. Furthermore, malicious attacks and noise interference can lead to incorrect outputs from CNNs. Enhancing the security and robustness of CNN to ensure its reliability in medical applications is an important research topic that needs to be solved.</p>
</sec>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Applications</title>
<sec id="s4_1">
<label>4.1</label>
<title>Radiomics</title>
<p>Radiomics combines large data analysis technology with medical imaging, which is mainly divided into five steps: medical image acquisition, ROI segmentation, feature extraction, feature selection, model building, and validation. Radiomics can acquire X-ray, US, CT, MRI, and even biopsy slice images. The regions related to the lesion in the image are summarized and segmented for further feature extraction. Next, radiomics like signal intensity, lesion shape, size, and texture features are extracted from ROI regions. Features with good repeatability, stability, and independence are selected by feature selection methods, such as minimum absolute contraction. Finally, model building and testing of independent samples were performed by common statistical methods and advanced ML strategies (<xref ref-type="bibr" rid="B64">64</xref>).</p>
<p>The clinical imperative of radiomics extends beyond feature extraction. Its core promise lies in translating complex quantitative data into objective, actionable biomarkers that can resolve diagnostic ambiguity in equivocal lesions, provide prognostic insights complementary to histopathology, and ultimately enable more personalized and confident patient management decisions.</p>
<p>Radiomics features mainly describe the heterogeneity of the internal structure of the lesions, and statistical features quantify the grayscale distribution and texture patterns of the images. By extracting these features, feature vectors can be constructed for subsequent ML algorithms for classification and diagnosis.</p>
<p>A critical challenge in radiomics is feature reproducibility. Extracted features are highly sensitive to variations in image acquisition parameters, pre-processing steps, and segmentation methods. Without rigorous standardization across centers, the generalizability of radiomics models is fundamentally limited. Many published studies fail to adequately address this issue or to perform validation on fully independent, external datasets, which is essential to assess true clinical applicability.</p>
<p>Consequently, the external validation of radiomics models on completely independent datasets, preferably from institutions using different equipment and protocols, is not merely a best practice but a prerequisite for assessing true clinical applicability. Many published high-performance models fail this critical test, their accuracy metrics reflecting optimistic bias inherent to single-center, retrospective studies. Rigorous external validation serves as the primary means to expose and quantify the impact of domain shift.</p>
<p>The development of radiomics has enabled the close integration of engineering intelligence with imaging medicine, presenting unprecedented opportunities for medical diagnosis and research. In disease diagnosis, radiomics can dig out hidden subtle information, objectively assess inter- and intra-tumor heterogeneity through spatial distribution, and assist physicians in more accurately determining the type, stage, and prognosis of diseases (<xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B66">66</xref>). In treatment planning, radiation therapists determine the irradiation range and dose more accurately based on the detailed information extracted from images, optimize the automatic planning process and dosimetric trade-offs, and improve the therapeutic efficacy while reducing damage to the surrounding healthy tissues (<xref ref-type="bibr" rid="B67">67</xref>, <xref ref-type="bibr" rid="B68">68</xref>). In disease monitoring and efficacy assessment, real-time tracking of disease progression and evaluation of treatment efficacy through image analysis allows for early detection of signs of recurrence or assessment of the effectiveness of treatment, leading to adjustment of treatment strategies.</p>
<p>The applications of radiomics in BC mainly include both lesion classification and treatment outcome prediction. Lesion classification involved differentiating benign and malignant lesions, molecular subtypes, and other clinicopathologic indices, including the status of the anterior sentinel lymph nodes. As a whole, radiomics is currently a popular direction expected to improve the accuracy of the diagnostic classification of breasts, with ultrasound-associated radiomics developing particularly rapidly (<xref ref-type="bibr" rid="B69">69</xref>).</p>
<sec id="s4_1_1">
<label>4.1.1</label>
<title>B-mode ultrasound</title>
<p>Radiomics, when applied to B-mode ultrasound (B-US), enhances the accuracy of breast lesion classification by extracting a multitude of quantitative features from ultrasound images. This is supported by frameworks that utilize elastographic features, quantitative ultrasound parametric images, and computer-aided diagnosis systems to improve the efficiency and accuracy of lesion segmentation and classification.</p>
<p>In patients with dense breast tissue, the ultrasound can differentiate cystic solidity within the lesion and provide important information regarding the breast lesions classification. The Breast imaging reporting and data system (BI-RADS) provides a standardized digital presentation of malignancy risk in breast lesions. A clear delineation of the probability of malignancy from zero to greater than ninety-five percent is achieved through the six classifications of breast lesions. The BI-RADS and final assessment greatly reduce the individualized ambiguity in clinicians&#x2019; understanding of the recommendations of the imaging report (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B70">70</xref>). For lesions in BI-RADS-3, further short-term follow-up is recommended, whereas for lesions in BI-RADS-4a or higher, immediate biopsy is recommended for diagnosis. The revised BI-RADS may offer the possibility of downgrading certain benign lesions from BI-RADS-4a to BI-RADS-3, contingent upon monitoring and safe follow-up, which could serve as an alternative to biopsy. How to ensure more accurate BI-RADS determinations is. Therefore, a key issue affecting the subsequent management of patients (see <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>).</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Interpretation of BI-RADS and data system scores.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">BI-RADS</th>
<th valign="middle" align="center">Malignant degree of the lesion</th>
<th valign="middle" align="center">Cancer risk</th>
<th valign="middle" align="center">Patient management</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">1</td>
<td valign="middle" align="left">Feminine</td>
<td valign="middle" align="left">About 0%</td>
<td valign="middle" align="left">Routine screening</td>
</tr>
<tr>
<td valign="middle" align="left">2</td>
<td valign="middle" align="left">Benign</td>
<td valign="middle" align="left">About 0%</td>
<td valign="middle" align="left">Routine screening</td>
</tr>
<tr>
<td valign="middle" align="left">3</td>
<td valign="middle" align="left">Possible benign</td>
<td valign="middle" align="left">0% - 2%</td>
<td valign="middle" align="left">6-month short-term interval safety follow-up</td>
</tr>
<tr>
<th valign="middle" colspan="4" align="left">4</th>
</tr>
<tr>
<td valign="middle" align="left">4a</td>
<td valign="middle" align="left">Low suspicion of malignancy</td>
<td valign="middle" align="left">2% - 10%</td>
<td valign="middle" align="left">Biopsy</td>
</tr>
<tr>
<td valign="middle" align="left">4b</td>
<td valign="middle" align="left">Moderate suspicion of malignancy</td>
<td valign="middle" align="left">10% - 50%</td>
<td valign="middle" align="left">Biopsy</td>
</tr>
<tr>
<td valign="middle" align="left">4c</td>
<td valign="middle" align="left">High suspicion of malignancy</td>
<td valign="middle" align="left">50% - 95%</td>
<td valign="middle" align="left">Biopsy</td>
</tr>
<tr>
<td valign="middle" align="left">5</td>
<td valign="middle" align="left">Highly suggestive of malignancy</td>
<td valign="middle" align="left">&gt;95%</td>
<td valign="middle" align="left">Biopsy</td>
</tr>
<tr>
<td valign="middle" align="left">6</td>
<td valign="middle" align="left">Malignancy</td>
<td valign="middle" align="left">100%</td>
<td valign="middle" align="left">Surgical resection is appropriately based on the patient&#x2019;s condition</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4_1_2">
<label>4.1.2</label>
<title>Elastography</title>
<p>Elastography assesses tissue stiffness by applying external forces to deform the tissue, assessing the elastic modulus of the tissue by analyzing the change in ultrasound signal before and after deformation. This provides clearer and more accurate information about the lesions compared to B-US and color Doppler imaging. Elastography has been extensively studied in breast lesion classification. Currently, elastography imaging techniques mainly include strain elastography (SE) and SWE, which may improve the diagnostic specificity of breast ultrasound.</p>
<p>SE reflects tissue stiffness by applying pressure with a probe, observing ultrasound image changes before and after the deformation of the tissue, and calculating the strain ratio or strain rate. Some studies have reported SEN, SPE, and ACC of 0.89, 0.90, and 0.90, respectively, in semiquantitative identifying breast lesions with SE (<xref ref-type="bibr" rid="B71">71</xref>). In a meta-analysis of SE and SWE for radiomic-based breast lesion classification, the combined SEN for SE in diagnosing breast lesions was 0.84 (95% CI: 0.82 - 0.87) (<xref ref-type="bibr" rid="B72">72</xref>).</p>
<p>SWE mainly uses acoustic radiation force or mechanical vibration to generate shear waves in tissues and detects the propagation velocity of shear waves by ultrasound imaging. The propagation velocity of shear waves is proportional to the elastic modulus of the tissue, so the stiffness of the tissue can be assessed by measuring the shear wave velocity. A meta-analysis on BC diagnosis involving SE and SWE modalities indicated that the combined sensitivity (SEN) for SWE was 0.85 (95% CI: 0.83 - 0.87), underscoring the enhanced diagnostic accuracy when SWE is used in conjunction with other imaging techniques. Although SWE and SE have similar diagnostic efficacy, SWE, with its demonstrated higher reproducibility and objectivity, offers significant practical advantages in medical applications, as evidenced by its successful implementation in assessing liver conditions, diagnosing prostate lesions, and detecting early renal damage in diabetes.</p>
<p>Despite significant advances in elastography for breast lesion classification, several limitations and challenges persist. Elastography results are susceptible to factors such as probe pressure, tissue depth, and respiratory motion. Different brands and models of ultrasound equipment vary in elastography performance, which may lead to inconsistent results. These factors need to be considered in practical applications.</p>
</sec>
</sec>
<sec id="s4_2">
<label>4.2</label>
<title>Deep learning application</title>
<p>Traditional ML methods rely on manual feature extraction and visual observation of image morphology by ultrasound workers, which requires domain knowledge and is difficult to capture complex patterns that may have significant inter-observer variations. The application of DL techniques in image classification, object detection, segmentation, and image synthesis provide a new perspective for the diagnosis of breast lesions. CNNs (e.g., ResNet, DenseNet), through the convolution layer CNNs (e.g., ResNet, DenseNet) automatically extract hierarchical image features. Their ability to characterize complex patterns, including subtle findings like architectural distortions and spiculated margins, is significantly superior to traditional methods. However, DL models rely on large-scale labeled data, and the high cost of medical image labeling has led to overfitting in some studies using small samples. The comparison of and advantages and limitations of ML and DL models in breast ultrasound is summarized in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>. In recent years, with the rapid growth in graphics processor computing power, more and more studies have integrated DL and radiomics (<xref ref-type="bibr" rid="B73">73</xref>&#x2013;<xref ref-type="bibr" rid="B75">75</xref>). Approaches that combine DL and radiomics utilize supervised learning techniques. Employing learning methods to preprocess limited image data and derive numerous quantitative features from these images through the machine&#x2019;s autonomous learning process. The subjectivity and uncertainty of radiologists&#x2019; classification choices are reduced to speed up the diagnosis and treatment and liberate the healthcare burden.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Comparison of traditional machine learning and deep learning in breast ultrasound.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Model</th>
<th valign="top" align="center">Core features</th>
<th valign="top" align="center">Feature extraction method</th>
<th valign="top" align="center">Data requirements</th>
<th valign="top" align="center">Interpretability</th>
<th valign="top" align="center">Advantages</th>
<th valign="top" align="center">Limitations</th>
</tr>
</thead>
<tbody>
<tr>
<th valign="top" colspan="7" align="left">ML</th>
</tr>
<tr>
<td valign="top" align="left">&#x2003;SVM</td>
<td valign="top" align="left">Hyperplane-based classification that relies on manually extracted morphological and textural features</td>
<td valign="top" align="left">Manual extraction of morphological features (edge regularity, aspect ratio), texture features (gray scale covariance matrix GLCM)</td>
<td valign="top" align="left">Small samples (&lt;500 cases)</td>
<td valign="top" align="left">High (based on feature weights)</td>
<td valign="top" align="left">Fast training for small samples</td>
<td valign="top" align="left">Reliance on expert characterization makes it difficult to capture complex patterns</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;RF</td>
<td valign="top" align="left">Integrated learning approach with voting decisions via multiple decision trees relying on radiomics features</td>
<td valign="top" align="left">Radiomics characterization (shape, first-order statistics, texture)</td>
<td valign="top" align="left">Medium sample (500-1000)</td>
<td valign="top" align="left">Medium (significance of characteristics is open to interpretation)</td>
<td valign="top" align="left">Robust to high-dimensional data, can be trained in parallel</td>
<td valign="top" align="left">Characterization works are time-consuming and noise-sensitive</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;KNN</td>
<td valign="top" align="left">Sample similarity-based classification, which relies on geometric and echo features, is an inert learning (no explicit training required)</td>
<td valign="top" align="left">Geometric features (size, circumference), echo characteristics</td>
<td valign="top" align="left">Small samples</td>
<td valign="top" align="left">High (based on nearest neighbor samples)</td>
<td valign="top" align="left">Simple algorithm, no training required</td>
<td valign="top" align="left">High computational complexity and sensitive to dimensionality</td>
</tr>
<tr>
<th valign="top" colspan="7" align="left">DL</th>
</tr>
<tr>
<td valign="top" align="left">&#x2003;CNN</td>
<td valign="top" align="left">Contains a convolutional layer, a pooling layer, and automatically learns hierarchical features from images, and is the underlying architecture for most DL models</td>
<td valign="top" align="left">Automatically learn multi-level features (from low-level edges to high-level semantics) from raw images</td>
<td valign="top" align="left">Large samples (&gt;1000 cases)</td>
<td valign="top" align="left">low</td>
<td valign="top" align="left">End-to-end learning without manual features</td>
<td valign="top" align="left">Requires large amounts of labeled data and high training costs</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;ResNet</td>
<td valign="top" align="left">The residual connection is introduced on the basis of CNN to solve the problem of deep network training, which belongs to the improved variant of CNN</td>
<td valign="top" align="left">Introducing residual connectivity to solve the deep network gradient vanishing problem</td>
<td valign="top" align="left">Medium to large-scale data</td>
<td valign="top" align="left">low</td>
<td valign="top" align="left">Trainable Extremely Deep Networks</td>
<td valign="top" align="left">High model complexity and difficult to interpret</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;DenseNet</td>
<td valign="top" align="left">Dense connectivity between layers to enhance feature propagation belongs to the optimized architecture of CNNs and is commonly used for image classification</td>
<td valign="top" align="left">Dense connectivity improves feature propagation efficiency</td>
<td valign="top" align="left">Large-scale data</td>
<td valign="top" align="left">low</td>
<td valign="top" align="left">High parameter efficiency and feature reusability</td>
<td valign="top" align="left">Longer training time</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;3D CNN</td>
<td valign="top" align="left">Extended to three-dimensional space based on CNN, it is suitable for spatio-temporal feature extraction of 3D ultrasound data</td>
<td valign="top" align="left">Simultaneous extraction of spatial and temporal dimension features</td>
<td valign="top" align="left">Multicenter data</td>
<td valign="top" align="left">low</td>
<td valign="top" align="left">For 3D ultrasound data, capturing spatial structure</td>
<td valign="top" align="left">Extremely high data requirements and high consumption of computing resources</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;RNN</td>
<td valign="top" align="left">Contains cyclic connections for processing sequence data (e.g., ultrasound video timing information) and belongs to the sequence modeling DL model</td>
<td valign="top" align="left">Processing sequence data (e.g., dynamic ultrasound video)</td>
<td valign="top" align="left">Medium-scale data</td>
<td valign="top" align="left">low</td>
<td valign="top" align="left">Capturing timing information</td>
<td valign="top" align="left">Training is unstable, and long sequences are difficult to process</td>
</tr>
<tr>
<td valign="top" align="left">&#x2003;GAN</td>
<td valign="top" align="left">It consists of a generator and a discriminator for image generation and enhancement, and belongs to the DL model of unsupervised learning</td>
<td valign="top" align="left">Image enhancement and synthesis (e.g., ultrasound image denoising)</td>
<td valign="top" align="left">Medium-scale series data</td>
<td valign="top" align="left">low</td>
<td valign="top" align="left">Generate high-quality synthetic data</td>
<td valign="top" align="left">Training instability, risk of mode crash</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>ML, Machine Learning; DL, Deep Learning; CNN, Convolutional Neural Network; RF, Random Forest; SVM, Support Vector Machine; ResNet, Residual Network; KNN, K-Nearest Neighbors; RNN, Recurrent neural network; GAN, Generative adversarial networks.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>DL techniques are employed for fully automated breast density segmentation and classification, with results strongly correlating with radiologists&#x2019; manual classification. Zhang et&#xa0;al. aim to investigate whether DL-based radiomics models can improve the diagnostic performance of ultrasound for breast lesion classification. They developed DL radiomics models based on B-US and SWE in terms of breast lesion classification and compared them with quantitative SWE parameters and diagnostic assessments by radiologists. The study found that the area under the ROC curve for both the B-US and SWE-based DL radiomics models in the training cohort was 0.99 (95% CI: 0.99 - 1.00). Both DL radiomics models were more specific than the maximum elasticity parameter in both the training and independent validation cohorts, and the DL radiomics model significantly outperformed the quantitative SWE parameter and the BI-RADS assessment for breast lesion classification. Huang et&#xa0;al. found that the CNN-based tumor identification and grading system had an accuracy BI-RADS-3 of 0.99, BI-RADS-4a of 0.94, BI-RADS-4b of 0.73, BI-RADS-4c of 0.92 (<xref ref-type="bibr" rid="B76">76</xref>). Ciritsis et&#xa0;al. similarly classified benign and malignant breast ultrasound image classification by BI-RADS and CNN modeling. CNN modeling accuracy for BI-RADS was slightly higher than that of radiologists (ACC: 0.93 vs 0.92) (<xref ref-type="bibr" rid="B77">77</xref>). These studies suggest that the integration of DL-based radiomics methods can mimic the human decision-making process and help improve ultrasound&#x2019;s ability to classify the benign and malignant nature of breast lesions.</p>
<p>However, these impressive accuracy metrics require cautious interpretation. The aforementioned studies are predominantly retrospective and single-center in design, which may introduce selection bias and limit generalizability. Performance achieved in optimized research environments may degrade significantly when models encounter data from different institutions, ultrasound machines, or patient populations&#x2014;a key test of real-world utility that many models have not yet passed. Therefore, while DL models show great potential, their clinical maturity and readiness for deployment should be viewed as preliminary, pending validation through robust, prospective, multi-center studies.</p>
<p>While these reported accuracies are promising, they require careful interpretation. The performance is typically achieved in optimized, retrospective research environments using single-center data. A major and frequently under-reported limitation is the potential for significant performance degradation when models encounter data from different institutions, ultrasound machines, or patient populations: a key test of real-world utility that many models have not yet passed.</p>
<p>Consequently, for AI to earn a definitive role in clinical pathways, validation must evolve. Future studies should prioritize prospective, multi-center trials that report not only standard accuracy metrics but also clinically meaningful endpoints. These include the reduction in unnecessary biopsies or short-term follow-ups, changes in radiologists&#x2019; diagnostic confidence and inter-reader agreement when using AI assistance, and the system&#x2019;s impact on overall workflow efficiency.</p>
<p>DL models are also more sensitive for identifying malignancy in non-mass breast lesions that do not strictly meet the BI-RADS definition. Non-mass breast lesions that lack distinctive margins, shape, and typical ultrasound features are more difficult to diagnose. These lesions have higher malignancy rates and poorer prognosis and quality of life for patients. Li et&#xa0;al. demonstrated the MobileNet model for the benign-malignant differentiation of non-mass breast lesions achieved an AUC of 0.84 (95% CI: 0.81 - 0.86) in the test set (<xref ref-type="bibr" rid="B78">78</xref>). Another study looked for imaging features associated with non-mass malignant breast lesions in the development dataset by multivariate LR and found that the development dataset in the ultrasound classification system had a higher AUC compared to when it was not applied (0.91 vs 0.95, <italic>P</italic> &lt; 0.05) (<xref ref-type="bibr" rid="B79">79</xref>). The development of DL model imaging histology may improve the accuracy of early diagnosis of non-mass breast lesions, ultimately improving patient prognosis.</p>
<p>Furthermore, integrating clinical data and pathological outcomes with pre-treatment ultrasound images can enhance the accuracy of breast lesion classification. This is achieved by leveraging radiomics, which employs deep learning models for classification, in conjunction with pertinent clinical information.</p>
</sec>
<sec id="s4_3">
<label>4.3</label>
<title>Explainable AI and challenges in clinical decision-making</title>
<p>Currently, AI models based on CNNs such as VGG, ResNet, and DenseNet have been applied to some medical image classification fields, improving the screening accuracy of lung nodules, breast cancer, and other diseases. However, the ethical and visualization problems associated with the &#x201c;black box&#x201d; nature of complex models such as deep learning remain to be solved. AI models also face significant generalization challenges in cross-institutional applications. Multicenter studies have shown that device differences, inconsistent annotations, and non-standardized acquisition can all lead to reduced diagnostic accuracy of AI models. Synergistic advancement through domain-adaptive algorithms, federated learning frameworks, and clinically standardized workflows is the key to moving AI from the lab to the clinic, where it is needed. XAI technologies such as Gradient-weighted Class Activation Mapping (Grad-CAM) are beginning to be integrated to enhance the transparency of how models are processed. XAI technologies enable clinicians and researchers to track which features in an ultrasound image affect the model&#x2019;s predictions and diagnostic decisions, ensuring that decisions are based on relevant image lesion features rather than artifacts in the data. This improves the trust and clinical applicability of AI models. In addition, XAI can identify whether the model is relying on flaws such as incorrect lesion features, helping engineers to improve their algorithms in a timely manner (<xref ref-type="bibr" rid="B80">80</xref>).</p>
<p>The XAI technology system is mainly divided into intrinsic and extrinsic interpretability (<xref ref-type="bibr" rid="B81">81</xref>). Intrinsic interpretability is based on the structural characteristics of the model, such as the weight coefficients of linear regression, the splitting rule of decision tree, etc., which naturally possesses human-understandable decision logic. Extrinsic interpretability interprets black-box models by means of <italic>post-hoc</italic> analysis, which is mainly run by feature importance analysis, visualization techniques, counterfactual interpretation, etc. SHAP (SHapley Additive exPlanations) is mainly based on the principle of game theory, which quantifies global feature importance by quantifying the contribution of each feature to the model prediction. In interpreting breast cancer AI models, SHAP values can point to a max influence of &#x201c;number of calcified foci&#x201d; on malignancy judgments. LIME (Local Interpretable Model-agnostic Explanations) explains local decision logic by generating linear models near the point of prediction. Grad-CAM localizes the region of interest of the CNN model in the image through gradient backpropagation.</p>
<p>While the current use of XAI technology has made model predictions more transparent, helping clinicians validate and trust AI-driven diagnoses. However, the development of medical XAI still faces significant challenges. The difficulty in translating heat map model results to clinical imaging terminology still biases physicians&#x2019; trust and use of model results (<xref ref-type="bibr" rid="B82">82</xref>).</p>
</sec>
<sec id="s4_4">
<label>4.4</label>
<title>Integration into clinical workflow and impact on diagnostic pathways</title>
<p>For AI models to realize their potential, diagnostic accuracy is a prerequisite, but their ultimate value will be determined by seamless integration into existing clinical workflows and their tangible impact on patient management decisions. Current evidence supporting such integration and impact remains limited. The implementation of AI in breast ultrasound can be envisioned in several roles: as a concurrent read assistant providing real-time feedback during image acquisition, a second reader for independent verification post-scan, or an automated triage tool flagging high-priority cases. Each mode presents distinct implications for workflow efficiency, radiologist responsibility, and required regulatory approval.</p>
<p>The most significant clinical impact of AI is anticipated in refining the management of diagnostically challenging cases, particularly within the BI-RADS 3 and BI-RADS 4a categories. By supplementing subjective visual assessment with quantitative risk scores, AI has the potential to reduce unnecessary short-term follow-ups for stable, very low-risk BI-RADS 3 lesions, and to decrease benign biopsy rates for BI-RADS 4a lesions by more confidently upgrading a high-risk subset. This optimization of the diagnostic pathway can alleviate patient anxiety, improve healthcare resource allocation, and potentially expedite treatment for aggressive malignancies. A forward-looking step would be the formal exploration of integrating AI-derived quantitative risk scores as a supplementary descriptor within the BI-RADS lexicon, potentially leading to a more nuanced and personalized risk assessment framework.</p>
<p>Successful translation, however, hinges on overcoming significant non-technical barriers that extend far beyond diagnostic accuracy. Clinician trust and acceptance must be cultivated, which depends not only on model interpretability but also on consistent performance within local clinical contexts and clear protocols for reconciling AI suggestions with clinical judgment. Seamless workflow integration requires adapting standardized reporting frameworks to incorporate AI outputs meaningfully, without creating ambiguity or additional burden. Furthermore, navigating the regulatory pathways for AI as a medical device demands a higher level of evidence, typically through prospective trials that demonstrate tangible clinical utility. Perhaps most critically, the widespread adoption of AI necessitates the development of clear medico-legal and ethical frameworks to address accountability, liability, and algorithmic bias, ensuring that these tools are deployed responsibly and equitably. Finally, demonstrating conclusive health economic value through tailored cost-effectiveness analyses is essential for sustainable implementation within diverse healthcare systems. Future development must, therefore, transition from proof-of-concept accuracy studies to pragmatic trials that measure clinically relevant endpoints such as the positive predictive value of AI-guided biopsy recommendations, changes in diagnostic confidence, and time-to-treatment intervals. The in-depth integration of XAI and medical knowledge still has a long way to go, and there is an urgent need to combine text, images, videos, and other forms to improve the intuitiveness of the explanation. Secondly, XAI technology has limitations: SHAP values have global interpretation consistency, but require thousands of model inferences and high computational complexity; LIME provides local interpretability, but may introduce bias due to simplified assumptions; Grad-CAM visualization is intuitive, but can only locate spatial regions and cannot quantify feature contribution. This difference in technical characteristics requires a multi-method joint interpretation strategy for clinical applications.</p>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>Commercial computer-aided detection and diagnostic systems</title>
<p>Radiologists cannot analyze medical image data accurately and completely for long periods due to fatigue, lack of experience, and other factors (<xref ref-type="bibr" rid="B83">83</xref>, <xref ref-type="bibr" rid="B84">84</xref>). The application of CAD systems in medical imaging has been vigorously explored and developed, with a history dating back to the 1970s. These systems, leveraging advanced technologies such as computer vision, deep learning, and artificial intelligence, have significantly improved diagnostic accuracy and efficiency. They are now widely used in various medical imaging fields, including CT, MRI, and ultrasound, and are crucial for the early detection and treatment of diseases like cancer. In the context of breast ultrasound, these systems are increasingly positioned not merely as detection tools but as integrated decision-support aids designed to augment the radiologist&#x2019;s performance, particularly in characterizing indeterminate lesions and reducing perceptual errors. Commercially available CAD systems are now accessible globally, and the subcommittee on CAD in diagnostic imaging aims to integrate the latest technological advancements and developments in CAD into diagnostic practices. Nosis in medical imaging and to develop techniques, practices, and standards to address practical application issues that arise in clinical settings.</p>
<p>CAD can effectively evaluate imaging data and integrate it into statistical algorithm models for analysis, assisting clinical physicians in making accurate diagnoses. CAD systems can provide specific lesion location information and/or diagnostic analysis (<xref ref-type="bibr" rid="B85">85</xref>). The computer-aided detection system mainly reminds radiologists to pay attention to these areas by marking image regions that may display abnormal lesions. The computer-aided diagnostic system, combined with other relevant diagnostic data and biomarkers, provides clinical physicians with an assessment of disease type, severity, staging, progression, or resolution.</p>
<p>Commercially available conventional CAD systems consist of localizing the ROI and classifying the ROI for breast lesion classification. Instantaneous results on the nature of the lesions are provided by analyzing the ROI in the form of frozen images on ultrasound devices, either automatically or manually selected. However, due to the complexity of the breast structure and the presence of noise in the ultrasound images, traditional manual characterization methods usually fail to achieve satisfactory results (<xref ref-type="bibr" rid="B86">86</xref>). These systems assist radiologists in detecting lesions and differentiating between benign and malignant lesions, serving as a &#x201c;second opinion&#x201d; to improve diagnostic accuracy and reduce unnecessary recalls. However, potential radiomic CAD systems warrant further exploration.</p>
<sec id="s5_1">
<label>5.1</label>
<title>S-Detect</title>
<p>S-Detect is regarded as the most crucial image analysis software program for commercial CAD systems for the breast. S-Detect directly pinpoints breast lesions and manually selects ROI to detect lesions on the ultrasound devices even after freezing the image and uploading it to the workstation. The addition of S-Detect clearly distinguishes the features with high malignant representation by outlining the ROI independently and improving the diagnostic efficiency.</p>
<p>S-Detect improves the use of breast ultrasound in clinical practice and assists radiologists in making correct diagnostic decisions. Kiwook K. et&#xa0;al. conducted the first performance study of an AI-driven tool involving 192 breast lesions. They found that S-Detect classified diagnoses with a significantly higher AUC than radiologists (0.73 vs. 0.65, <italic>P</italic> = 0.04). This aligns with broader research indicating AI&#x2019;s potential to enhance diagnostic accuracy in breast cancer. A study determining the effect of S-Detect on the diagnostic ability of radiologists of different experiences found that the diagnostic accuracy of inexperienced radiologists improved significantly with CAD assistance, as well as the SPE and PPV of experienced radiologists. Diagnostic results between radiologists and S-Detect showed moderate concordance (Kappa = 0.58) (<xref ref-type="bibr" rid="B87">87</xref>, <xref ref-type="bibr" rid="B88">88</xref>). Wang et&#xa0;al. demonstrated significant improvement in AUC for radiologists of different experience when using S-Detect for categorical diagnosis (<xref ref-type="bibr" rid="B89">89</xref>). S-Detect shows potential to augment diagnostic accuracy, particularly for less-experienced readers. However, its utility is not uniform; performance is highly dependent on the chosen BI-RADS threshold and remains subject to variability from manual ROI selection. Most importantly, its effectiveness and consistency across diverse clinical settings lack robust validation through large-scale, multi-center prospective studies, which is necessary before broad clinical recommendations can be made.</p>
<p>S-Detect also improves inter- and intra-observer concordance (<xref ref-type="bibr" rid="B90">90</xref>). The final assessment of inter-observer variability by category for ultrasound was significantly improved (<italic>P</italic> &lt; 0.001) in Park&#x2019;s study (<xref ref-type="bibr" rid="B87">87</xref>).</p>
<p>Different definitions of benign and malignant for BI-RADS may lead to different diagnostic efficacy of S-Detect. The diagnostic efficacy of S-Detect was significantly higher than that of radiologists when the threshold of benignity and malignancy of the lesion was set to the BI-RADS-4a (ACC: 0.71 vs 0.56, <italic>P</italic> &lt; 0.05). However, the diagnostic efficacy of S-Detect was significantly lower than that of radiologists (ACC: 0.71 vs 0.87, <italic>P</italic> &lt; 0.05) when the threshold value was set to BI-RADS-4b (<xref ref-type="bibr" rid="B89">89</xref>). At the same time, S-Detect also leads to changes in diagnostic BI-RADS classification (<xref ref-type="bibr" rid="B91">91</xref>, <xref ref-type="bibr" rid="B92">92</xref>). However, the diagnostic performance of S-Detect demonstrates significant dependency on BI-RADS threshold definitions. This performance discrepancy underscores potential limitations in risk stratification across BI-RADS categories. Additionally, the requirement for manual ROI selection introduces operator-dependent variability, as inter-operator differences in lesion contouring may influence feature extraction and classification outcomes. Multicenter validation studies are currently lacking to establish the generalizability of diagnostic consistency across diverse clinical settings. The analysis of the benignness or malignancy of BI-RADS lesions by S-Detect remains questionable, and S-Detect does not completely liberate radiologists from ultrasound diagnosis. The stability of the diagnostic efficacy of the S-Detect software for assisted classification of breast lesions under different definitions and its adaptability to the BI-RADS should be continuously explored in clinical practice.</p>
</sec>
<sec id="s5_2">
<label>5.2</label>
<title>Other systems</title>
<p>In addition to S-Detect software, CAD systems such as MobileNet, KOIOS, and others have shown good value in breast lesion classification.</p>
<p>MobileNet can accurately classify breast lesions, especially BI-RADS-4 breast lesions (<xref ref-type="bibr" rid="B93">93</xref>). A screening study involving 479 BI-RADS-4a breast lesions found that the MobileNet model performed best in classification (AUC: 0.90, ACC: 0.91). It was predicted that 14.4% of BI-RADS-4a patients would be upgraded to BI-RADS-4b as a result of MobileNet modeling (<xref ref-type="bibr" rid="B94">94</xref>). The good performance of MobileNet for benign and malignant differentiation of non-mass breast lesions has also been described previously (<xref ref-type="bibr" rid="B78">78</xref>). Compared with the base MobileNet model, the novel MobileNet-v2 model improved based on super-resolution ultrasound images and demonstrated better diagnostic performance in the breast lesion classification, with its AUC improved by 0.09 and 0.03 in the training and test sets, respectively (<xref ref-type="bibr" rid="B95">95</xref>). The MobileNet model can reduce the incidence of malignant tumors in breast lesions diagnosed as BI-RADS-4a on preoperative ultrasound examination, enabling clinicians to strengthen the attention to such patients, make timely and correct management, and avoid delays. Although MobileNet achieves a great AUC for BI-RADS classification, but struggles with non-mass lesions. Transfer learning-based models like MobileNet-v2 require large-scale, standardized datasets to avoid overfitting.</p>
<p>KOIOS DS TM (KOIOS) has demonstrated excellent performance as a new CAD system in providing risk assessment for breast malignancy. KOIOS redefines breast lesion classification based primarily on the BI-RADS (see <xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>). KOIOS, like S-Detect, improves the correct assessment of ultrasound breast lesions for most physicians (<xref ref-type="bibr" rid="B95">95</xref>). Its decision support for the assessment of breast ultrasound lesions may reduce unnecessary biopsies. KOIOS has a significant impact on the assessment of lesions in the breast and on decisions about whether to biopsy a lesion (<xref ref-type="bibr" rid="B96">96</xref>). The SEN of KOIOS for performing breast lesion classification and deciding whether to biopsy a lesion was 0.87 (95% CI: 0.84 - 0.90), whereas the mean SEN for radiologists&#x2019; judgments from ultrasound images only was 0.83 (95% CI: 0.78 - 0.89) (<xref ref-type="bibr" rid="B97">97</xref>). Patients with KOIOS-recommended biopsies had a higher rate of positivity compared with BI-RADS-recommended biopsies (<xref ref-type="bibr" rid="B98">98</xref>). At the same time, KOIOS may also improve inter- and intra-observer agreements. However, it has been criticized for underperforming in patients with dense breast tissue (<xref ref-type="bibr" rid="B99">99</xref>). Its reliance on multimodal radiomics may introduce bias when clinical data is incomplete (<xref ref-type="bibr" rid="B100">100</xref>).</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>KOIOS system rating interpretation.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">KOIOS</th>
<th valign="top" align="center">Cancer risk</th>
<th valign="top" align="center">BI-RADS</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Benign (Kbe)</td>
<td valign="top" align="left">0% - 0.5%</td>
<td valign="top" align="left">BI-RADS 1<break/>BI-RADS 2</td>
</tr>
<tr>
<td valign="top" align="left">Possible benign (Kpb)</td>
<td valign="top" align="left">0.5% - 2%</td>
<td valign="top" align="left">BI-RADS 3</td>
</tr>
<tr>
<td valign="top" align="left">Suspicious (KSS)</td>
<td valign="top" align="left">2% - 50%</td>
<td valign="top" align="left">BI-RADS-4a<break/>BI-RADS-4b</td>
</tr>
<tr>
<td valign="top" align="left">Possible malignant (KPM)</td>
<td valign="top" align="left">50% - 95%</td>
<td valign="top" align="left">BI-RADS-4c<break/>BI-RADS 5</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Despite the vigorous development and achievements of S-Detect and KOIOS software, CAD systems for BC screening continue to face numerous challenges, notably the absence of a global public dataset, complexities in binary classification, inadequate image quality, and a heavy reliance on manual ROI annotation. There is still a long way to go in the development of commercial CAD systems.</p>
</sec>
</sec>
<sec id="s6">
<label>6</label>
<title>Evaluating translational readiness: bridging the gap between research and clinic</title>
<p>The transition of AI from research prototypes to reliable clinical tools necessitates a critical appraisal of their validation rigor and real-world robustness. <xref ref-type="table" rid="T5"><bold>Table&#xa0;5</bold></xref> provides a systematic comparison of key characteristics between typical single-center research prototypes and commercially available multi-center validated systems, highlighting the gaps that must be addressed for widespread adoption.</p>
<table-wrap id="T5" position="float">
<label>Table&#xa0;5</label>
<caption>
<p>Systematic comparison between single-center research prototypes and commercially deployed AI systems in breast ultrasound.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Aspect</th>
<th valign="top" align="center">Research prototype</th>
<th valign="top" align="center">Commercial system</th>
<th valign="top" align="center">Core translational gap</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Data &amp; Validation</td>
<td valign="top" align="center">Single-center, retrospective. Internal validation</td>
<td valign="top" align="center">Multi-center, prospective. External validation required</td>
<td valign="top" align="center">Proven on local data<break/>vs.<break/>validated for broad use</td>
</tr>
<tr>
<td valign="top" align="center">Performance Focus</td>
<td valign="top" align="center">Aggregate metrics</td>
<td valign="top" align="center">Clinical endpoint impact</td>
<td valign="top" align="center">Benchmark excellence<break/>vs.<break/>clinical utility</td>
</tr>
<tr>
<td valign="top" align="center">Failure &amp; Robustness</td>
<td valign="top" align="center">Rarely analyzed</td>
<td valign="top" align="center">Failure mode analysis is mandated</td>
<td valign="top" align="center">Unknown reliability<break/>vs.<break/>quantified safety</td>
</tr>
<tr>
<td valign="top" align="center">Development Cycle</td>
<td valign="top" align="center">Fast, novel algorithms</td>
<td valign="top" align="center">Slow, regulated updates</td>
<td valign="top" align="center">Pursuit of novelty<break/>vs.<break/>requirement for stability</td>
</tr>
<tr>
<td valign="top" align="center">Success Metric</td>
<td valign="top" align="center">Algorithmic novelty, benchmark rank</td>
<td valign="top" align="center">Clinical adoption, cost-benefit</td>
<td valign="top" align="center">Academic achievement<break/>vs.<break/>healthcare impact</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s6_1">
<label>6.1</label>
<title>The single-center research paradigm: high performance with limited generalizability</title>
<p>Studies developing novel AI models (e.g., customized CNNs, GANs for enhancement) predominantly follow a single-center, retrospective design. While these works demonstrate impressive accuracy (often AUC &gt;0.90) and drive methodological innovation, their results are intrinsically tied to the local data domain. Performance claims are vulnerable to domain shift and may not hold across different ultrasound machines, patient demographics, or institutional practices. The primary value of this paradigm lies in proof-of-concept and algorithm advancement, not in proving clinical readiness.</p>
</sec>
<sec id="s6_2">
<label>6.2</label>
<title>The commercial CAD pathway: emphasis on standardization and multi-center assessment</title>
<p>Commercial systems like S-Detect and KOIOS DS represent a later stage in the translational pipeline. Their development increasingly incorporates data from multiple sites, albeit often under controlled conditions. More importantly, their evaluation increasingly includes multi-center validation studies (e.g., references 87, 97). While reported diagnostic metrics may sometimes appear more modest than research prototypes, they often reflect performance across a broader range of clinical scenarios. The focus shifts towards workflow integration, user interface design, regulatory clearance, and demonstrating consistent utility across diverse settings&#x2014;key factors for clinical adoption that are frequently absent in research papers.</p>
</sec>
<sec id="s6_3">
<label>6.3</label>
<title>The critical role of external validation and handling heterogeneity</title>
<p>The most significant distinction between a research prototype and a clinically viable tool is the evidence from prospective, externally validated trials. Future research must prioritize this step. Furthermore, explicit strategies to mitigate domain shift are needed. These include: (1) Developing and reporting model performance stratified by ultrasound device vendor or model; (2) Utilizing domain adaptation techniques in model training to improve robustness to acquisition differences; (3) Federated learning frameworks that enable training on distributed data without centralizing sensitive patient information, thereby inherently encompassing multi-center heterogeneity.</p>
</sec>
</sec>
<sec id="s7" sec-type="conclusions">
<label>7</label>
<title>Conclusions and future perspectives</title>
<p>This narrative review comprehensively examines recent advancements in AI for breast ultrasound, with a critical focus on its evolving role in lesion classification and the significant translational gaps that must be addressed. Selected studies under controlled, often single-center retrospective conditions have demonstrated that AI models can achieve high classification accuracy for specific tasks. A roadmap diagram showing evolution from traditional CAD to DL enhanced systems has been clearly presented in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>. However, it is imperative to contextualize these figures. They predominantly originate from single-center, retrospective evaluations often conducted on curated datasets. The translational leap to reliable, generalizable performance in heterogeneous clinical practice&#x2014;where image quality, patient demographics, and equipment vary widely&#x2014;remains largely unproven and constitutes the foremost current challenge. Within optimized research settings, the advancement of AI technology has demonstrated the potential to significantly improve the precision and efficiency of diagnosing breast diseases, as evidenced by studies showing AI&#x2019;s ability to enhance the accuracy of BC detection and reduce the workload of radiologists (see <xref ref-type="table" rid="T6"><bold>Table&#xa0;6</bold></xref>). However, the reproducibility of these benefits across diverse clinical environments awaits confirmation through more rigorous validation. Furthermore, we underscore the paramount importance of radiomics in breast lesion classification and clinical practice, to provide radiologists and clinicians with theoretical support for diagnosis and treatment. The integration of radiomics into the clinical practice of breast lesion staging is expected to enhance the efficiency of early screening and facilitate timely diagnosis and treatment of breast cancer. The integration of explainable AI and strategies to improve cross-institutional generalization will be pivotal for translating AI from research to clinical practice. However, realizing this potential requires more than technical excellence; it demands concerted efforts in addressing data heterogeneity across institutions, establishing robust regulatory pathways for AI as a medical device, and fostering interdisciplinary collaboration among engineers, clinicians, and healthcare administrators.</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Evolution from traditional CAD to DL-enhanced systems.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-16-1759194-g003.tif">
<alt-text content-type="machine-generated">Infographic shaped like a fishbone illustrates the evolution of computer-aided diagnosis (CAD) from the 1970s to the future of artificial general intelligence. Key milestones include traditional CAD using AutoCAD for calcification detection, machine learning CAD with SVM and RF for lesion classification, deep learning (DL) enhanced CAD with ResNet and DenseNet for intelligent diagnosis, and current multimodal intelligence using GenAI and Explainable AI for data standardization. The bottom section summarizes trends in feature processing, application scenarios, and increasing data dependency over time.</alt-text>
</graphic></fig>
<table-wrap id="T6" position="float">
<label>Table&#xa0;6</label>
<caption>
<p>AI Models for breast lesion classification in ultrasound.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Study</th>
<th valign="middle" align="center">Year</th>
<th valign="middle" align="center">AI model</th>
<th valign="middle" align="center">Dataset</th>
<th valign="middle" align="center">Sample size</th>
<th valign="middle" align="center">Objective</th>
<th valign="middle" align="center">Performance metrics</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Wan KW et&#xa0;al. (<xref ref-type="bibr" rid="B33">33</xref>)</td>
<td valign="middle" align="left">2021</td>
<td valign="middle" align="left">AutoML Vision</td>
<td valign="middle" align="left">Cairo University Breast Ultrasound Images dataset<break/>Mendeley Data BUS dataset</td>
<td valign="middle" align="left">895 images</td>
<td valign="middle" align="left">Breast Lesions Classification</td>
<td valign="middle" align="left">The best-performing AI model:<break/>Random Forest: ACC: 0.90, SEN: 0.71, SPE: 1.00, F1: 0.83, AUCPR: 0.90<break/>Target model:<break/>AutoML Vision: ACC: 0.86, SEN: 0.84, SPE: 0.88, F1: 0.83, AUCPR: 0.95</td>
</tr>
<tr>
<td valign="middle" align="left">Becker AS et&#xa0;al. (<xref ref-type="bibr" rid="B50">50</xref>)</td>
<td valign="middle" align="left">2018</td>
<td valign="middle" align="left">ResNet-50</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">632 cases</td>
<td valign="middle" align="left">Breast Lesions Classification</td>
<td valign="middle" align="left">AUC: 0.84, SEN:0.84, SPE:0.80</td>
</tr>
<tr>
<td valign="middle" align="left">Qian X, et&#xa0;al. (<xref ref-type="bibr" rid="B51">51</xref>)</td>
<td valign="middle" align="left">2020</td>
<td valign="middle" align="left">A CNN model combining B-mode and color Doppler</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">106565 images</td>
<td valign="middle" align="left">Breast Lesions Classification</td>
<td valign="middle" align="left">AUC: 0.98</td>
</tr>
<tr>
<td valign="middle" align="left">Qian L et&#xa0;al. (<xref ref-type="bibr" rid="B54">54</xref>)</td>
<td valign="middle" align="left">2021</td>
<td valign="middle" align="left">Res Net and VGG Net</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">320 images</td>
<td valign="middle" align="left">Predict postoperative upgrading of pure ductal carcinoma <italic>in situ</italic> diagnosed by core needle biopsy before surgery.</td>
<td valign="middle" align="left">AUC: 0.76 ACC: 0.74 SEN: 0.73 SPE: 0.75</td>
</tr>
<tr>
<td valign="middle" align="left">Cao Z et&#xa0;al. (<xref ref-type="bibr" rid="B59">59</xref>)</td>
<td valign="middle" align="left">2019</td>
<td valign="middle" align="left">AlexNet, ZFNet, VGG, ResNet, GoogLeNet and DenseNet</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">1043 cases</td>
<td valign="middle" align="left">Breast lesion detection and classification</td>
<td valign="middle" align="left">The best-performing AI model:<break/>ResNet: ACC: 0.75</td>
</tr>
<tr>
<td valign="middle" align="left">Balasubramaniam S et&#xa0;al. (<xref ref-type="bibr" rid="B60">60</xref>)</td>
<td valign="middle" align="left">2021</td>
<td valign="middle" align="left">LeNet</td>
<td valign="middle" align="left">Kaggle Dataset</td>
<td valign="middle" align="left">971 images</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">ACC: 0.90</td>
</tr>
<tr>
<td valign="middle" align="left">AlZoubi A et&#xa0;al. (<xref ref-type="bibr" rid="B61">61</xref>)</td>
<td valign="middle" align="left">2024</td>
<td valign="middle" align="left">BNet, GNet, SqNet, DsNet, RsNet and IncReNet</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">3034 images</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">The best-performing AI model:<break/>BONet: ACC: 0.83</td>
</tr>
<tr>
<td valign="middle" align="left">Zhao G et&#xa0;al. (<xref ref-type="bibr" rid="B62">62</xref>)</td>
<td valign="middle" align="left">2023</td>
<td valign="middle" align="left">DL-video and DL-image</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">1000 cases</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">DL-video: AUC: 0.97<break/>DL-image: AUC: 0.93</td>
</tr>
<tr>
<td valign="middle" align="left">Huang Y et&#xa0;al. (<xref ref-type="bibr" rid="B76">76</xref>)</td>
<td valign="middle" align="left">2019</td>
<td valign="middle" align="left">ROI-CNN</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">2238 images</td>
<td valign="middle" align="left">Breast lesion detection and classification</td>
<td valign="middle" align="left">ACC: 0.99</td>
</tr>
<tr>
<td valign="middle" align="left">Ciritsis A et&#xa0;al. (<xref ref-type="bibr" rid="B77">77</xref>)</td>
<td valign="middle" align="left">2019</td>
<td valign="middle" align="left">dCNN</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">582 cases</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">The classification of BI-RADS 2 versus BI-RADS 3-5, ACC: 0.87<break/>The classification of BI-RADS 2&#x2013;3 versus BI-RADS 4-5, ACC: 0.93<break/>The classification of BI-RADS 2&#x2013;4 versus BI-RADS 5, ACC: 0.84</td>
</tr>
<tr>
<td valign="middle" align="left">Li G et&#xa0;al. (<xref ref-type="bibr" rid="B78">78</xref>)</td>
<td valign="middle" align="left">2023</td>
<td valign="middle" align="left">DenseNet 121448 and MobileNet 448</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">824 cases</td>
<td valign="middle" align="left">Breast lesion detection and classification</td>
<td valign="middle" align="left">The better-performing AI model:<break/>MobileNet 448:<break/>Detection: AUC: 0.99, ACC: 0.97, SEN: 0.97<break/>Classification: AUC: 0.84, ACC: 0.71, SEN: 0.80</td>
</tr>
<tr>
<td valign="middle" align="left">Park KW et&#xa0;al. (<xref ref-type="bibr" rid="B79">79</xref>)</td>
<td valign="middle" align="left">2021</td>
<td valign="middle" align="left">A classification system</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">715 cases</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">AUC: 0.95</td>
</tr>
<tr>
<td valign="middle" align="left">Park HJ et&#xa0;al. (<xref ref-type="bibr" rid="B87">87</xref>)</td>
<td valign="middle" align="left">2017</td>
<td valign="middle" align="left">S-Detect</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">192 cases</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">When the cutoff was set at category 4a, S-Detect AUC: 0.73,<break/>Radiologist AUC: 0.65</td>
</tr>
<tr>
<td valign="middle" align="left">Zhao Z et&#xa0;al. (<xref ref-type="bibr" rid="B94">94</xref>)</td>
<td valign="middle" align="left">2022</td>
<td valign="middle" align="left">MobileNet, DenseNet 121, Xception and Inception V3</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">479 cases</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">The best-performing AI model:<break/>Mobile Net: AUC:0.90, ACC: 0.91, SEN: 0.93</td>
</tr>
<tr>
<td valign="middle" align="left">Yang L.et al. (<xref ref-type="bibr" rid="B95">95</xref>)</td>
<td valign="middle" align="left">2023</td>
<td valign="middle" align="left">ORResNet 101, ORMobileNet v2, SRResNet 101, SRMobileNet v2, ORLR, ORSVM, SRLR, SRSVM\</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">333 cases</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">The best-performing AI model:<break/>SRMobileNet v2: AUC improvements of 0.089 and 0.031 in the training and testing sets</td>
</tr>
<tr>
<td valign="middle" align="left">Mango VL et&#xa0;al. (<xref ref-type="bibr" rid="B97">97</xref>)</td>
<td valign="middle" align="left">2025</td>
<td valign="middle" align="left">Koios DS</td>
<td valign="middle" align="left">Private Hospital Dataset</td>
<td valign="middle" align="left">222 cases</td>
<td valign="middle" align="left">Breast lesion classification</td>
<td valign="middle" align="left">The SEN of KOIOS in classifying breast masses and deciding whether to perform a biopsy on lesions is 0.87</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The development of AI requires continuous optimization of validation strategies and rigorous validation. Establishing the reliability of AI models requires a hierarchy of evidence, with prospective external validation on fully independent, multi-center cohorts being the gold standard for assessing real-world generalizability. This process must explicitly account for and analyze performance across different ultrasound equipment vendors and acquisition protocols to quantify and address domain shift. Furthermore, systematic comparisons between research-grade prototypes and commercially deployed systems reveal that readiness for clinical application is determined less by peak accuracy in constrained settings and more by proven robustness, standardized evaluation, and successful integration into heterogeneous clinical environments.</p>
<p>While <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref> and <xref ref-type="table" rid="T6"><bold>Table&#xa0;6</bold></xref> illustrate the technological evolution and performance landscape, translating this potential into routine clinical practice requires a focused roadmap addressing the following critical pathways:</p>
<sec id="s7_1">
<label>7.1</label>
<title>Development of standardized, multi-vendor datasets and benchmarks</title>
<p>The creation of large-scale, publicly available datasets, acquired from diverse ultrasound systems and adhering to standardized imaging protocols, is a fundamental prerequisite. These should serve not only for training but as independent benchmark platforms to rigorously evaluate model generalizability and resilience to domain shift:a key limitation highlighted in current research.</p>
</sec>
<sec id="s7_2">
<label>7.2</label>
<title>Conducting regulatory-grade clinical trials</title>
<p>Future validation must transition from retrospective accuracy studies to prospective, multi-center trials designed with regulatory endpoints for Software as a Medical Device. These trials should demonstrate improvement in tangible clinical outcomes, such as increased positive predictive value of biopsies or reduced time to definitive diagnosis, providing the evidence base required for FDA, CE, or NMPA clearance.</p>
</sec>
<sec id="s7_3">
<label>7.3</label>
<title>Formal integration into clinical reporting and decision pathways</title>
<p>For seamless adoption, AI&#x2019;s role must be defined within the radiological workflow. Its output requires structured integration into the BI-RADS reporting framework. This could involve providing a quantitative malignancy risk score alongside the BI-RADS category or offering decision-support prompts to aid in the management of equivocal lesions, thereby directly influencing patient management algorithms.</p>
<p>Beyond technical and clinical validation, the responsible integration of AI into healthcare demands proactive engagement with its broader regulatory, ethical, and legal ecosystem. Future development must align with evolving standards for Software as a Medical Device, requiring early dialogue with regulatory bodies and study designs that meet approval requirements. Concurrently, the field must establish ethical guidelines and governance structures to ensure fairness, transparency, and accountability in algorithm development and deployment. Clear medico-legal protocols are also needed to define the standard of care and shared responsibility in AI-assisted diagnosis, addressing critical questions of liability and data privacy. Navigating these complex dimensions is not ancillary but fundamental to building the trust required for AI to become a viable and sustained component of clinical practice.</p>
</sec>
<sec id="s7_4">
<label>7.4</label>
<title>Implementation science and health economics research</title>
<p>Beyond technical and clinical validation, successful deployment necessitates implementation science studies to identify barriers to adoption and robust health economic analyses to demonstrate cost-effectiveness within specific healthcare systems. This evidence is crucial for stakeholder buy-in and sustainable integration.</p>
<p>In conclusion, the journey from promising research to routine practice hinges on concerted efforts across multiple interconnected domains: the creation of standardized, multi-vendor benchmarks; the generation of prospective, regulatory-grade clinical evidence; the development of frameworks for structured clinical integration; the proactive navigation of regulatory, ethical, and legal landscapes; and the demonstration of practical implementation value through health economics research. By prioritizing these comprehensive translational imperatives, the field can ensure that AI evolves into a reliable, trusted, and equitably deployed standard of care in breast ultrasound.</p>
</sec>
</sec>
</body>
<back>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>TM: Data curation, Writing &#x2013; original draft, Validation, Writing &#x2013; review &amp; editing. ZW: Writing &#x2013; original draft, Methodology, Conceptualization, Validation. JD: Formal analysis, Writing &#x2013; original draft, Investigation. YC: Investigation, Writing &#x2013; review &amp; editing, Validation. HZ: Visualization, Conceptualization, Writing &#x2013; review &amp; editing. XC: Supervision, Writing &#x2013; review &amp; editing, Project administration.</p></sec>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author XC declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p></sec>
<sec id="s11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Irmici</surname> <given-names>G</given-names></name>
<name><surname>C&#xe8;</surname> <given-names>M</given-names></name>
<name><surname>Pepa</surname> <given-names>GD</given-names></name>
<name><surname>D&#x2019;Ascoli</surname> <given-names>E</given-names></name>
<name><surname>De Berardinis</surname> <given-names>C</given-names></name>
<name><surname>Giambersio</surname> <given-names>E</given-names></name>
<etal/>
</person-group>. 
<article-title>Exploring the potential of artificial intelligence in breast ultrasound</article-title>. <source>Crit Rev Oncog</source>. (<year>2024</year>) <volume>29</volume>:<fpage>15</fpage>&#x2013;<lpage>28</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1615/critrevoncog.2023048873</pub-id>, PMID: <pub-id pub-id-type="pmid">38505878</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<label>2</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bray</surname> <given-names>F</given-names></name>
<name><surname>Laversanne</surname> <given-names>M</given-names></name>
<name><surname>Sung</surname> <given-names>H</given-names></name>
<name><surname>Ferlay</surname> <given-names>J</given-names></name>
<name><surname>Siegel</surname> <given-names>RL</given-names></name>
<name><surname>Soerjomataram</surname> <given-names>I</given-names></name>
<etal/>
</person-group>. 
<article-title>Global cancer statistics 2022: GLOBOCAN estimates of incidence and mortality worldwide for 36 cancers in 185 countries</article-title>. <source>CA: Cancer J Clin</source>. (<year>2024</year>) <volume>74</volume>:<page-range>229&#x2013;63</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3322/caac.21834</pub-id>, PMID: <pub-id pub-id-type="pmid">38572751</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<label>3</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Burstein</surname> <given-names>HJ</given-names></name>
<name><surname>Curigliano</surname> <given-names>G</given-names></name>
<name><surname>Th&#xfc;rlimann</surname> <given-names>B</given-names></name>
<name><surname>Weber</surname> <given-names>WP</given-names></name>
<name><surname>Poortmans</surname> <given-names>P</given-names></name>
<name><surname>Regan</surname> <given-names>MM</given-names></name>
<etal/>
</person-group>. 
<article-title>Customizing local and systemic therapies for women with early breast cancer: the St. Gallen International Consensus Guidelines for treatment of early breast cancer 2021</article-title>. <source>Ann Oncol Off J Eur Soc Med Oncol</source>. (<year>2021</year>) <volume>32</volume>:<page-range>1216&#x2013;35</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.annonc.2021.06.023</pub-id>, PMID: <pub-id pub-id-type="pmid">34242744</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<label>4</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Barzaman</surname> <given-names>K</given-names></name>
<name><surname>Karami</surname> <given-names>J</given-names></name>
<name><surname>Zarei</surname> <given-names>Z</given-names></name>
<name><surname>Hosseinzadeh</surname> <given-names>A</given-names></name>
<name><surname>Kazemi</surname> <given-names>MH</given-names></name>
<name><surname>Moradi-Kalbolandi</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>Breast cancer: Biology, biomarkers, and treatments</article-title>. <source>Int Immunopharmacol</source>. (<year>2020</year>) <volume>84</volume>:<fpage>106535</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.intimp.2020.106535</pub-id>, PMID: <pub-id pub-id-type="pmid">32361569</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<label>5</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Peart</surname> <given-names>O</given-names></name>
</person-group>. 
<article-title>Metastatic breast cancer</article-title>. <source>Radiol Technol</source>. (<year>2017</year>) <volume>88</volume>:<page-range>519m&#x2013;39m</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.intimp.2020.106535</pub-id>, PMID: <pub-id pub-id-type="pmid">32361569</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<label>6</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Xu</surname> <given-names>Y</given-names></name>
<name><surname>Gong</surname> <given-names>M</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Yang</surname> <given-names>Y</given-names></name>
<name><surname>Liu</surname> <given-names>S</given-names></name>
<name><surname>Zeng</surname> <given-names>Q</given-names></name>
</person-group>. 
<article-title>Global trends and forecasts of breast cancer incidence and deaths</article-title>. <source>Sci Data</source>. (<year>2023</year>) <volume>10</volume>:<fpage>334</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41597-023-02253-5</pub-id>, PMID: <pub-id pub-id-type="pmid">37244901</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<label>7</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Thigpen</surname> <given-names>D</given-names></name>
<name><surname>Kappler</surname> <given-names>A</given-names></name>
<name><surname>Brem</surname> <given-names>R</given-names></name>
</person-group>. 
<article-title>The role of ultrasound in screening dense breasts-A review of the literature and practical solutions for implementation</article-title>. <source>Diagnost (Basel Switzerland)</source>. (<year>2018</year>) <volume>8</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics8010020</pub-id>, PMID: <pub-id pub-id-type="pmid">29547532</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<label>8</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yang</surname> <given-names>L</given-names></name>
<name><surname>Wang</surname> <given-names>S</given-names></name>
<name><surname>Zhang</surname> <given-names>L</given-names></name>
<name><surname>Sheng</surname> <given-names>C</given-names></name>
<name><surname>Song</surname> <given-names>F</given-names></name>
<name><surname>Wang</surname> <given-names>P</given-names></name>
<etal/>
</person-group>. 
<article-title>Performance of ultrasonography screening for breast cancer: a systematic review and meta-analysis</article-title>. <source>BMC Cancer</source>. (<year>2020</year>) <volume>20</volume>:<fpage>499</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12885-020-06992-1</pub-id>, PMID: <pub-id pub-id-type="pmid">32487106</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<label>9</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Giambersio</surname> <given-names>E</given-names></name>
<name><surname>Depretto</surname> <given-names>C</given-names></name>
<name><surname>Trimboli</surname> <given-names>RM</given-names></name>
<name><surname>Di Leo</surname> <given-names>G</given-names></name>
<name><surname>D&#x2019;Ascoli</surname> <given-names>E</given-names></name>
<name><surname>Della Pepa</surname> <given-names>G</given-names></name>
<etal/>
</person-group>. 
<article-title>Utility of detection of breast calcifications with integrated real-time radiography system (IRRS) during digital breast tomosynthesis (DBT)-guided vacuum assisted biopsy (VAB): initial single-center experience</article-title>. <source>La Radiol Medica</source>. (<year>2023</year>) <volume>128</volume>:<fpage>699</fpage>&#x2013;<lpage>703</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11547-023-01636-3</pub-id>, PMID: <pub-id pub-id-type="pmid">37115391</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<label>10</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pishdad</surname> <given-names>P</given-names></name>
<name><surname>Moosavi</surname> <given-names>A</given-names></name>
<name><surname>Jalli</surname> <given-names>R</given-names></name>
<name><surname>Zarei</surname> <given-names>F</given-names></name>
<name><surname>Saeedi-Moghadam</surname> <given-names>M</given-names></name>
<name><surname>Zeinali-Rafsanjani</surname> <given-names>B</given-names></name>
</person-group>. 
<article-title>How can additional ultrasonography screening improve the detection of occult breast cancer in women with dense breasts</article-title>? <source>Polish J Radiol</source>. (<year>2020</year>) <volume>85</volume>:<page-range>e353&#x2013;e60</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.5114/pjr.2020.97944</pub-id>, PMID: <pub-id pub-id-type="pmid">32817768</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<label>11</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zanotel</surname> <given-names>M</given-names></name>
<name><surname>Bednarova</surname> <given-names>I</given-names></name>
<name><surname>Londero</surname> <given-names>V</given-names></name>
<name><surname>Linda</surname> <given-names>A</given-names></name>
<name><surname>Lorenzon</surname> <given-names>M</given-names></name>
<name><surname>Girometti</surname> <given-names>R</given-names></name>
<etal/>
</person-group>. 
<article-title>Automated breast ultrasound: basic principles and emerging clinical applications</article-title>. <source>La Radiol Medica</source>. (<year>2018</year>) <volume>123</volume>:<fpage>1</fpage>&#x2013;<lpage>12</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11547-017-0805-z</pub-id>, PMID: <pub-id pub-id-type="pmid">28849324</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<label>12</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gu</surname> <given-names>J</given-names></name>
<name><surname>Jiang</surname> <given-names>T</given-names></name>
</person-group>. 
<article-title>Ultrasound radiomics in personalized breast management: Current status and future prospects</article-title>. <source>Front Oncol</source>. (<year>2022</year>) <volume>12</volume>:<elocation-id>963612</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2022.963612</pub-id>, PMID: <pub-id pub-id-type="pmid">36059645</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<label>13</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Romeo</surname> <given-names>V</given-names></name>
<name><surname>Cuocolo</surname> <given-names>R</given-names></name>
<name><surname>Apolito</surname> <given-names>R</given-names></name>
<name><surname>Stanzione</surname> <given-names>A</given-names></name>
<name><surname>Ventimiglia</surname> <given-names>A</given-names></name>
<name><surname>Vitale</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Clinical value of radiomics and machine learning in breast ultrasound: a multicenter study for differential diagnosis of benign and Malignant lesions</article-title>. <source>Eur Radiol</source>. (<year>2021</year>) <volume>31</volume>:<page-range>9511&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-021-08009-2</pub-id>, PMID: <pub-id pub-id-type="pmid">34018057</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<label>14</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mahant</surname> <given-names>SS</given-names></name>
<name><surname>Varma</surname> <given-names>AR</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in breast ultrasound: the emerging future of modern medicine</article-title>. <source>Cureus</source>. (<year>2022</year>) <volume>14</volume>:<fpage>e28945</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.7759/cureus.28945</pub-id>, PMID: <pub-id pub-id-type="pmid">36237807</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<label>15</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Afrin</surname> <given-names>H</given-names></name>
<name><surname>Larson</surname> <given-names>NB</given-names></name>
<name><surname>Fatemi</surname> <given-names>M</given-names></name>
<name><surname>Alizad</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Deep learning in different ultrasound methods for breast cancer, from diagnosis to prognosis: current trends, challenges, and an analysis</article-title>. <source>Cancers</source>. (<year>2023</year>) <volume>15</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/cancers15123139</pub-id>, PMID: <pub-id pub-id-type="pmid">37370748</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<label>16</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Brunetti</surname> <given-names>N</given-names></name>
<name><surname>Calabrese</surname> <given-names>M</given-names></name>
<name><surname>Martinoli</surname> <given-names>C</given-names></name>
<name><surname>Tagliafico</surname> <given-names>AS</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in breast ultrasound: from diagnosis to prognosis-A rapid review</article-title>. <source>Diagnost (Basel Switzerland)</source>. (<year>2022</year>) <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics13010058</pub-id>, PMID: <pub-id pub-id-type="pmid">36611350</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<label>17</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Leung</surname> <given-names>JH</given-names></name>
<name><surname>Karmakar</surname> <given-names>R</given-names></name>
<name><surname>Mukundan</surname> <given-names>A</given-names></name>
<name><surname>Thongsit</surname> <given-names>P</given-names></name>
<name><surname>Chen</surname> <given-names>MM</given-names></name>
<name><surname>Chang</surname> <given-names>WY</given-names></name>
<etal/>
</person-group>. 
<article-title>Systematic meta-analysis of computer-aided detection of breast cancer using hyperspectral imaging</article-title>. <source>Bioeng (Basel)</source>. (<year>2024</year>) <volume>11</volume>:<elocation-id>1060</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/bioengineering11111060</pub-id>, PMID: <pub-id pub-id-type="pmid">39593720</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<label>18</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hsu</surname> <given-names>H</given-names></name>
<name><surname>Lee</surname> <given-names>KH</given-names></name>
<name><surname>Karmakar</surname> <given-names>R</given-names></name>
<name><surname>Mukundan</surname> <given-names>A</given-names></name>
<name><surname>Attar</surname> <given-names>RS</given-names></name>
<name><surname>Liu</surname> <given-names>PH</given-names></name>
<etal/>
</person-group>. 
<article-title>From innovation to application: can emerging imaging techniques transform breast cancer diagnosis</article-title>? <source>Diagnost (Basel)</source>. (<year>2025</year>) <volume>15</volume>:<elocation-id>2718</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics15212718</pub-id>, PMID: <pub-id pub-id-type="pmid">41226009</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<label>19</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Karmakar</surname> <given-names>R</given-names></name>
<name><surname>Nagisetti</surname> <given-names>Y</given-names></name>
<name><surname>Mukundan</surname> <given-names>A</given-names></name>
<name><surname>Wang</surname> <given-names>HC</given-names></name>
</person-group>. 
<article-title>Impact of the family and socioeconomic factors as a tool of prevention of breast cancer</article-title>. <source>World J Clin Oncol</source>. (<year>2025</year>) <volume>16</volume>:<elocation-id>106569</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.5306/wjco.v16.i5.106569</pub-id>, PMID: <pub-id pub-id-type="pmid">40503398</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<label>20</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mintz</surname> <given-names>Y</given-names></name>
<name><surname>Brodie</surname> <given-names>R</given-names></name>
</person-group>. 
<article-title>Introduction to artificial intelligence in medicine</article-title>. <source>Minimally Invasive Ther Allied Technol: MITAT: Off J Soc Minimally Invasive Ther</source>. (<year>2019</year>) <volume>28</volume>:<fpage>73</fpage>&#x2013;<lpage>81</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1080/13645706.2019.1575882</pub-id>, PMID: <pub-id pub-id-type="pmid">30810430</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<label>21</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gupta</surname> <given-names>R</given-names></name>
<name><surname>Srivastava</surname> <given-names>D</given-names></name>
<name><surname>Sahu</surname> <given-names>M</given-names></name>
<name><surname>Tiwari</surname> <given-names>S</given-names></name>
<name><surname>Ambasta</surname> <given-names>RK</given-names></name>
<name><surname>Kumar</surname> <given-names>P</given-names></name>
</person-group>. 
<article-title>Artificial intelligence to deep learning: machine intelligence approach for drug discovery</article-title>. <source>Mol Divers</source>. (<year>2021</year>) <volume>25</volume>:<page-range>1315&#x2013;60</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11030-021-10217-3</pub-id>, PMID: <pub-id pub-id-type="pmid">33844136</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<label>22</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yu</surname> <given-names>KH</given-names></name>
<name><surname>Healey</surname> <given-names>E</given-names></name>
<name><surname>Leong</surname> <given-names>TY</given-names></name>
<name><surname>Kohane</surname> <given-names>IS</given-names></name>
<name><surname>Manrai</surname> <given-names>AK</given-names></name>
</person-group>. 
<article-title>Medical artificial intelligence and human values</article-title>. <source>New Engl J Med</source>. (<year>2024</year>) <volume>390</volume>:<page-range>1895&#x2013;904</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1056/nejmra2214183</pub-id>, PMID: <pub-id pub-id-type="pmid">38810186</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<label>23</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lai</surname> <given-names>YC</given-names></name>
<name><surname>Chen</surname> <given-names>HH</given-names></name>
<name><surname>Hsu</surname> <given-names>JF</given-names></name>
<name><surname>Hong</surname> <given-names>YJ</given-names></name>
<name><surname>Chiu</surname> <given-names>TT</given-names></name>
<name><surname>Chiou</surname> <given-names>HJ</given-names></name>
</person-group>. 
<article-title>Evaluation of physician performance using a concurrent-read artificial intelligence system to support breast ultrasound interpretation</article-title>. <source>Breast (Edinburgh Scotland)</source>. (<year>2022</year>) <volume>65</volume>:<page-range>124&#x2013;35</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.breast.2022.07.009</pub-id>, PMID: <pub-id pub-id-type="pmid">35944352</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<label>24</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Masters</surname> <given-names>K</given-names></name>
<name><surname>Herrmann-Werner</surname> <given-names>A</given-names></name>
<name><surname>Festl-Wietek</surname> <given-names>T</given-names></name>
<name><surname>Taylor</surname> <given-names>D</given-names></name>
</person-group>. 
<article-title>Preparing for artificial general intelligence (AGI) in health professions education: AMEE guide no. 172</article-title>. <source>Med Teacher</source>. (<year>2024</year>) <volume>46</volume>:<page-range>1258&#x2013;71</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1080/0142159x.2024.2387802</pub-id>, PMID: <pub-id pub-id-type="pmid">39115700</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<label>25</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Teo</surname> <given-names>ZL</given-names></name>
<name><surname>Quek</surname> <given-names>CWN</given-names></name>
<name><surname>Wong</surname> <given-names>JLY</given-names></name>
<name><surname>Ting</surname> <given-names>DSW</given-names></name>
</person-group>. 
<article-title>Cybersecurity in the generative artificial intelligence era</article-title>. <source>Asia Pacif J Ophthalmol (Philadelphia Pa)</source>. (<year>2024</year>) <volume>13</volume>:<elocation-id>100091</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.apjo.2024.100091</pub-id>, PMID: <pub-id pub-id-type="pmid">39209217</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<label>26</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Waqas</surname> <given-names>A</given-names></name>
<name><surname>Bui</surname> <given-names>MM</given-names></name>
<name><surname>Glassy</surname> <given-names>EF</given-names></name>
<name><surname>El Naqa</surname> <given-names>I</given-names></name>
<name><surname>Borkowski</surname> <given-names>P</given-names></name>
<name><surname>Borkowski</surname> <given-names>AA</given-names></name>
<etal/>
</person-group>. 
<article-title>Revolutionizing digital pathology with the power of generative artificial intelligence and foundation models</article-title>. <source>Lab Investigat J Tech Methods Pathol</source>. (<year>2023</year>) <volume>103</volume>:<elocation-id>100255</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.labinv.2023.100255</pub-id>, PMID: <pub-id pub-id-type="pmid">37757969</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<label>27</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Handelman</surname> <given-names>GS</given-names></name>
<name><surname>Kok</surname> <given-names>HK</given-names></name>
<name><surname>Chandra</surname> <given-names>RV</given-names></name>
<name><surname>Razavi</surname> <given-names>AH</given-names></name>
<name><surname>Lee</surname> <given-names>MJ</given-names></name>
<name><surname>Asadi</surname> <given-names>H</given-names></name>
</person-group>. 
<article-title>eDoctor: machine learning and the future of medicine</article-title>. <source>J Internal Med</source>. (<year>2018</year>) <volume>284</volume>:<page-range>603&#x2013;19</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1111/joim.12822</pub-id>, PMID: <pub-id pub-id-type="pmid">30102808</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<label>28</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Do</surname> <given-names>S</given-names></name>
<name><surname>Song</surname> <given-names>KD</given-names></name>
<name><surname>Chung</surname> <given-names>JW</given-names></name>
</person-group>. 
<article-title>Basics of deep learning: A radiologist&#x2019;s guide to understanding published radiology articles on deep learning</article-title>. <source>Korean J Radiol</source>. (<year>2020</year>) <volume>21</volume>:<fpage>33</fpage>&#x2013;<lpage>41</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3348/kjr.2019.0312</pub-id>, PMID: <pub-id pub-id-type="pmid">31920027</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<label>29</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Litjens</surname> <given-names>G</given-names></name>
<name><surname>Kooi</surname> <given-names>T</given-names></name>
<name><surname>Bejnordi</surname> <given-names>BE</given-names></name>
<name><surname>Setio</surname> <given-names>AAA</given-names></name>
<name><surname>Ciompi</surname> <given-names>F</given-names></name>
<name><surname>Ghafoorian</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>A survey on deep learning in medical image analysis</article-title>. <source>Med Image Anal</source>. (<year>2017</year>) <volume>42</volume>:<fpage>60</fpage>&#x2013;<lpage>88</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id>, PMID: <pub-id pub-id-type="pmid">28778026</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<label>30</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Brattain</surname> <given-names>LJ</given-names></name>
<name><surname>Telfer</surname> <given-names>BA</given-names></name>
<name><surname>Dhyani</surname> <given-names>M</given-names></name>
<name><surname>Grajo</surname> <given-names>JR</given-names></name>
<name><surname>Samir</surname> <given-names>AE</given-names></name>
</person-group>. 
<article-title>Machine learning for medical ultrasound: status, methods, and future opportunities</article-title>. <source>Abdominal Radiol (New York)</source>. (<year>2018</year>) <volume>43</volume>:<page-range>786&#x2013;99</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00261-018-1517-0</pub-id>, PMID: <pub-id pub-id-type="pmid">29492605</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<label>31</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hubbard</surname> <given-names>AE</given-names></name>
<name><surname>Kherad-Pajouh</surname> <given-names>S</given-names></name>
<name><surname>van der Laan</surname> <given-names>MJ</given-names></name>
</person-group>. 
<article-title>Statistical inference for data adaptive target parameters</article-title>. <source>Int J Biostat</source>. (<year>2016</year>) <volume>12</volume>:<fpage>3</fpage>&#x2013;<lpage>19</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1515/ijb-2015-0013</pub-id>, PMID: <pub-id pub-id-type="pmid">27227715</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<label>32</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Choi</surname> <given-names>RY</given-names></name>
<name><surname>Coyner</surname> <given-names>AS</given-names></name>
<name><surname>Kalpathy-Cramer</surname> <given-names>J</given-names></name>
<name><surname>Chiang</surname> <given-names>MF</given-names></name>
<name><surname>Campbell</surname> <given-names>JP</given-names></name>
</person-group>. 
<article-title>Introduction to machine learning, neural networks, and deep learning</article-title>. <source>Trans Vision Sci Technol</source>. (<year>2020</year>) <volume>9</volume>:<elocation-id>14</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1167/tvst.9.2.14</pub-id>, PMID: <pub-id pub-id-type="pmid">32704420</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<label>33</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wan</surname> <given-names>KW</given-names></name>
<name><surname>Wong</surname> <given-names>CH</given-names></name>
<name><surname>Ip</surname> <given-names>HF</given-names></name>
<name><surname>Fan</surname> <given-names>D</given-names></name>
<name><surname>Yuen</surname> <given-names>PL</given-names></name>
<name><surname>Fong</surname> <given-names>HY</given-names></name>
<etal/>
</person-group>. 
<article-title>Evaluation of the performance of traditional machine learning algorithms, convolutional neural network and AutoML Vision in ultrasound breast lesions classification: a comparative study</article-title>. <source>Quant Imaging Med Surg</source>. (<year>2021</year>) <volume>11</volume>:<page-range>1381&#x2013;93</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.21037/qims-20-922</pub-id>, PMID: <pub-id pub-id-type="pmid">33816176</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<label>34</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pehrson</surname> <given-names>LM</given-names></name>
<name><surname>Lauridsen</surname> <given-names>C</given-names></name>
<name><surname>Nielsen</surname> <given-names>MB</given-names></name>
</person-group>. 
<article-title>Machine learning and deep learning applied in ultrasound</article-title>. <source>Ultraschall der Med (Stuttgart Germany: 1980)</source>. (<year>2018</year>) <volume>39</volume>:<page-range>379&#x2013;81</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1055/a-0642-9545</pub-id>, PMID: <pub-id pub-id-type="pmid">30071556</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<label>35</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mousavi</surname> <given-names>SM</given-names></name>
<name><surname>Beroza</surname> <given-names>GC</given-names></name>
</person-group>. 
<article-title>Deep-learning seismology</article-title>. <source>Sci (New York NY)</source>. (<year>2022</year>) <volume>377</volume>:<fpage>eabm4470</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1126/science.abm4470</pub-id>, PMID: <pub-id pub-id-type="pmid">35951699</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<label>36</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kriegeskorte</surname> <given-names>N</given-names></name>
<name><surname>Golan</surname> <given-names>T</given-names></name>
</person-group>. 
<article-title>Neural network models and deep learning</article-title>. <source>Curr Biol: CB</source>. (<year>2019</year>) <volume>29</volume>:<page-range>R231&#x2013;r6</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cub.2019.02.034</pub-id>, PMID: <pub-id pub-id-type="pmid">30939301</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<label>37</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>LeCun</surname> <given-names>Y</given-names></name>
<name><surname>Bengio</surname> <given-names>Y</given-names></name>
<name><surname>Hinton</surname> <given-names>G</given-names></name>
</person-group>. 
<article-title>Deep learning</article-title>. <source>Nature</source>. (<year>2015</year>) <volume>521</volume>:<page-range>436&#x2013;44</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/nature14539</pub-id>, PMID: <pub-id pub-id-type="pmid">26017442</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<label>38</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Miotto</surname> <given-names>R</given-names></name>
<name><surname>Wang</surname> <given-names>F</given-names></name>
<name><surname>Wang</surname> <given-names>S</given-names></name>
<name><surname>Jiang</surname> <given-names>X</given-names></name>
<name><surname>Dudley</surname> <given-names>JT</given-names></name>
</person-group>. 
<article-title>Deep learning for healthcare: review, opportunities and challenges</article-title>. <source>Briefings Bioinf</source>. (<year>2018</year>) <volume>19</volume>:<page-range>1236&#x2013;46</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/bib/bbx044</pub-id>, PMID: <pub-id pub-id-type="pmid">28481991</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<label>39</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Georgevici</surname> <given-names>AI</given-names></name>
<name><surname>Terblanche</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>Neural networks and deep learning: a brief introduction</article-title>. <source>Intensive Care Med</source>. (<year>2019</year>) <volume>45</volume>:<page-range>712&#x2013;4</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00134-019-05537-w</pub-id>, PMID: <pub-id pub-id-type="pmid">30725133</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<label>40</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lee</surname> <given-names>C</given-names></name>
<name><surname>Kim</surname> <given-names>Y</given-names></name>
<name><surname>Kim</surname> <given-names>YS</given-names></name>
<name><surname>Jang</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>Automatic disease annotation from radiology reports using artificial intelligence implemented by a recurrent neural network</article-title>. <source>AJR Am J Roentgenol</source>. (<year>2019</year>) <volume>212</volume>:<page-range>734&#x2013;40</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2214/ajr.18.19869</pub-id>, PMID: <pub-id pub-id-type="pmid">30699011</pub-id>
</mixed-citation>
</ref>
<ref id="B41">
<label>41</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Alam</surname> <given-names>MS</given-names></name>
<name><surname>Wang</surname> <given-names>D</given-names></name>
<name><surname>Sowmya</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>AMFP-net: Adaptive multi-scale feature pyramid network for diagnosis of pneumoconiosis from chest X-ray images</article-title>. <source>Artif Intell Med</source>. (<year>2024</year>) <volume>154</volume>:<elocation-id>102917</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.artmed.2024.102917</pub-id>, PMID: <pub-id pub-id-type="pmid">38917599</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<label>42</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Parsa</surname> <given-names>M</given-names></name>
<name><surname>Rad</surname> <given-names>HY</given-names></name>
<name><surname>Vaezi</surname> <given-names>H</given-names></name>
<name><surname>Hossein-Zadeh</surname> <given-names>GA</given-names></name>
<name><surname>Setarehdan</surname> <given-names>SK</given-names></name>
<name><surname>Rostami</surname> <given-names>R</given-names></name>
<etal/>
</person-group>. 
<article-title>EEG-based classification of individuals with neuropsychiatric disorders using deep neural networks: A systematic review of current status and future directions</article-title>. <source>Comput Methods Programs Biomed</source>. (<year>2023</year>) <volume>240</volume>:<elocation-id>107683</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cmpb.2023.107683</pub-id>, PMID: <pub-id pub-id-type="pmid">37406421</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<label>43</label>
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Wenzel</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>Generative adversarial networks and other generative models</article-title>. In: 
<person-group person-group-type="editor">
<name><surname>Colliot</surname> <given-names>O</given-names></name>
</person-group>, editor. <source>Machine Learning for Brain Disorders</source>. 
<publisher-name>Humana</publisher-name>, <publisher-loc>New York, NY</publisher-loc> (<year>2023</year>). p. <page-range>139&#x2013;92</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-1-0716-3195-9_5</pub-id>, PMID: <pub-id pub-id-type="pmid">37988513</pub-id>
</mixed-citation>
</ref>
<ref id="B44">
<label>44</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Dreyer</surname> <given-names>KJ</given-names></name>
<name><surname>Geis</surname> <given-names>JR</given-names></name>
</person-group>. 
<article-title>When machines think: radiology&#x2019;s next frontier</article-title>. <source>Radiology</source>. (<year>2017</year>) <volume>285</volume>:<page-range>713&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.2017171183</pub-id>, PMID: <pub-id pub-id-type="pmid">29155639</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<label>45</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Avanzo</surname> <given-names>M</given-names></name>
<name><surname>Wei</surname> <given-names>L</given-names></name>
<name><surname>Stancanello</surname> <given-names>J</given-names></name>
<name><surname>Valli&#xe8;res</surname> <given-names>M</given-names></name>
<name><surname>Rao</surname> <given-names>A</given-names></name>
<name><surname>Morin</surname> <given-names>O</given-names></name>
<etal/>
</person-group>. 
<article-title>Machine and deep learning methods for radiomics</article-title>. <source>Med Phys</source>. (<year>2020</year>) <volume>47</volume>:<page-range>e185&#x2013;202</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/mp.13678</pub-id>, PMID: <pub-id pub-id-type="pmid">32418336</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<label>46</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Luijten</surname> <given-names>B</given-names></name>
<name><surname>Cohen</surname> <given-names>R</given-names></name>
<name><surname>de Bruijn</surname> <given-names>FJ</given-names></name>
<name><surname>Schmeitz</surname> <given-names>HAW</given-names></name>
<name><surname>Mischi</surname> <given-names>M</given-names></name>
<name><surname>Eldar</surname> <given-names>YC</given-names></name>
<etal/>
</person-group>. 
<article-title>Adaptive ultrasound beamforming using deep learning</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2020</year>) <volume>39</volume>:<page-range>3967&#x2013;78</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/tmi.2020.3008537</pub-id>, PMID: <pub-id pub-id-type="pmid">32746139</pub-id>
</mixed-citation>
</ref>
<ref id="B47">
<label>47</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>J</given-names></name>
<name><surname>Li</surname> <given-names>C</given-names></name>
<name><surname>Liu</surname> <given-names>L</given-names></name>
<name><surname>Chen</surname> <given-names>H</given-names></name>
<name><surname>Han</surname> <given-names>H</given-names></name>
<name><surname>Zhang</surname> <given-names>B</given-names></name>
<etal/>
</person-group>. 
<article-title>Speckle noise reduction for medical ultrasound images based on cycle-consistent generative adversarial network</article-title>. <source>Biomed Signal Process Control</source>. (<year>2023</year>) <volume>86</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/tbme.2008.923140</pub-id>, PMID: <pub-id pub-id-type="pmid">18713684</pub-id>
</mixed-citation>
</ref>
<ref id="B48">
<label>48</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Ge</surname> <given-names>X</given-names></name>
<name><surname>Ma</surname> <given-names>H</given-names></name>
<name><surname>Qi</surname> <given-names>S</given-names></name>
<name><surname>Zhang</surname> <given-names>G</given-names></name>
<name><surname>Yao</surname> <given-names>Y</given-names></name>
</person-group>. 
<article-title>Deep learning in medical ultrasound image analysis: A review</article-title>. <source>IEEE Access</source>. (<year>2021</year>) <volume>9</volume>:<page-range>54310&#x2013;24</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ACCESS.2021.3071301</pub-id>, PMID: <pub-id pub-id-type="pmid">41116384</pub-id>
</mixed-citation>
</ref>
<ref id="B49">
<label>49</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sudharson</surname> <given-names>S</given-names></name>
<name><surname>Kokil</surname> <given-names>P</given-names></name>
</person-group>. 
<article-title>Computer-aided diagnosis system for the classification of multi-class kidney abnormalities in the noisy ultrasound images</article-title>. <source>Comput Methods Programs Biomed</source>. (<year>2021</year>) <volume>205</volume>:<elocation-id>106071</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cmpb.2021.106071</pub-id>, PMID: <pub-id pub-id-type="pmid">33887632</pub-id>
</mixed-citation>
</ref>
<ref id="B50">
<label>50</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Becker</surname> <given-names>AS</given-names></name>
<name><surname>Mueller</surname> <given-names>M</given-names></name>
<name><surname>Stoffel</surname> <given-names>E</given-names></name>
<name><surname>Marcon</surname> <given-names>M</given-names></name>
<name><surname>Ghafoor</surname> <given-names>S</given-names></name>
<name><surname>Boss</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Classification of breast cancer in ultrasound imaging using a generic deep learning analysis software: a pilot study</article-title>. <source>Br J Radiol</source>. (<year>2018</year>) <volume>91</volume>:<elocation-id>20170576</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1259/bjr.20170576</pub-id>, PMID: <pub-id pub-id-type="pmid">29215311</pub-id>
</mixed-citation>
</ref>
<ref id="B51">
<label>51</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Qian</surname> <given-names>X</given-names></name>
<name><surname>Zhang</surname> <given-names>B</given-names></name>
<name><surname>Liu</surname> <given-names>S</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Chen</surname> <given-names>X</given-names></name>
<name><surname>Liu</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>A combined ultrasonic B-mode and color Doppler system for the classification of breast masses using neural network</article-title>. <source>Eur Radiol</source>. (<year>2020</year>) <volume>30</volume>:<page-range>3023&#x2013;33</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-019-06610-0</pub-id>, PMID: <pub-id pub-id-type="pmid">32006174</pub-id>
</mixed-citation>
</ref>
<ref id="B52">
<label>52</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Shin</surname> <given-names>Y</given-names></name>
<name><surname>Lowerison</surname> <given-names>MR</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Chen</surname> <given-names>X</given-names></name>
<name><surname>You</surname> <given-names>Q</given-names></name>
<name><surname>Dong</surname> <given-names>Z</given-names></name>
<etal/>
</person-group>. 
<article-title>Context-aware deep learning enables high-efficacy localization of high concentration microbubbles for super-resolution ultrasound localization microscopy</article-title>. <source>Nat Commun</source>. (<year>2024</year>) <volume>15</volume>:<fpage>2932</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41467-024-47154-2</pub-id>, PMID: <pub-id pub-id-type="pmid">38575577</pub-id>
</mixed-citation>
</ref>
<ref id="B53">
<label>53</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Dai</surname> <given-names>F</given-names></name>
<name><surname>Li</surname> <given-names>Y</given-names></name>
<name><surname>Zhu</surname> <given-names>Y</given-names></name>
<name><surname>Li</surname> <given-names>B</given-names></name>
<name><surname>Shi</surname> <given-names>Q</given-names></name>
<name><surname>Chen</surname> <given-names>Y</given-names></name>
<etal/>
</person-group>. 
<article-title>B-mode ultrasound to elastography synthesis using multiscale learning</article-title>. <source>Ultrasonics</source>. (<year>2024</year>) <volume>138</volume>:<elocation-id>107268</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ultras.2024.107268</pub-id>, PMID: <pub-id pub-id-type="pmid">38402836</pub-id>
</mixed-citation>
</ref>
<ref id="B54">
<label>54</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Qian</surname> <given-names>L</given-names></name>
<name><surname>Lv</surname> <given-names>Z</given-names></name>
<name><surname>Zhang</surname> <given-names>K</given-names></name>
<name><surname>Wang</surname> <given-names>K</given-names></name>
<name><surname>Zhu</surname> <given-names>Q</given-names></name>
<name><surname>Zhou</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>Application of deep learning to predict underestimation in ductal carcinoma in <italic>situ</italic> of the breast with ultrasound</article-title>. <source>Ann Trans Med</source>. (<year>2021</year>) <volume>9</volume>:<fpage>295</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.21037/atm-20-3981</pub-id>, PMID: <pub-id pub-id-type="pmid">33708922</pub-id>
</mixed-citation>
</ref>
<ref id="B55">
<label>55</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hahnloser</surname> <given-names>RH</given-names></name>
<name><surname>Sarpeshkar</surname> <given-names>R</given-names></name>
<name><surname>Mahowald</surname> <given-names>MA</given-names></name>
<name><surname>Douglas</surname> <given-names>RJ</given-names></name>
<name><surname>Seung</surname> <given-names>HS</given-names></name>
</person-group>. 
<article-title>Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit</article-title>. <source>Nature</source>. (<year>2000</year>) <volume>405</volume>:<page-range>947&#x2013;51</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/35016072</pub-id>, PMID: <pub-id pub-id-type="pmid">10879535</pub-id>
</mixed-citation>
</ref>
<ref id="B56">
<label>56</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kaur</surname> <given-names>G</given-names></name>
<name><surname>Sinha</surname> <given-names>R</given-names></name>
<name><surname>Tiwari</surname> <given-names>PK</given-names></name>
<name><surname>Yadav</surname> <given-names>SK</given-names></name>
<name><surname>Pandey</surname> <given-names>P</given-names></name>
<name><surname>Raj</surname> <given-names>R</given-names></name>
<etal/>
</person-group>. 
<article-title>Face mask recognition system using CNN model</article-title>. <source>Neurosci Inform</source>. (<year>2022</year>) <volume>2</volume>:<elocation-id>100035</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.neuri.2021.100035</pub-id>, PMID: <pub-id pub-id-type="pmid">36819833</pub-id>
</mixed-citation>
</ref>
<ref id="B57">
<label>57</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chartrand</surname> <given-names>G</given-names></name>
<name><surname>Cheng</surname> <given-names>PM</given-names></name>
<name><surname>Vorontsov</surname> <given-names>E</given-names></name>
<name><surname>Drozdzal</surname> <given-names>M</given-names></name>
<name><surname>Turcotte</surname> <given-names>S</given-names></name>
<name><surname>Pal</surname> <given-names>CJ</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning: A primer for radiologists</article-title>. <source>Radiogr: Rev Publ Radiol Soc North America Inc</source>. (<year>2017</year>) <volume>37</volume>:<page-range>2113&#x2013;31</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/rg.2017170077</pub-id>, PMID: <pub-id pub-id-type="pmid">29131760</pub-id>
</mixed-citation>
</ref>
<ref id="B58">
<label>58</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Singh</surname> <given-names>D</given-names></name>
<name><surname>Kumar</surname> <given-names>V</given-names></name>
<name><surname>Kaur</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>Densely connected convolutional networks-based COVID-19 screening model</article-title>. <source>Appl Intell (Dordrecht Netherlands)</source>. (<year>2021</year>) <volume>51</volume>:<page-range>3044&#x2013;51</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10489-020-02149-6</pub-id>, PMID: <pub-id pub-id-type="pmid">34764584</pub-id>
</mixed-citation>
</ref>
<ref id="B59">
<label>59</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cao</surname> <given-names>Z</given-names></name>
<name><surname>Duan</surname> <given-names>L</given-names></name>
<name><surname>Yang</surname> <given-names>G</given-names></name>
<name><surname>Yue</surname> <given-names>T</given-names></name>
<name><surname>Chen</surname> <given-names>Q</given-names></name>
</person-group>. 
<article-title>An experimental study on breast lesion detection and classification from ultrasound images using deep learning architectures</article-title>. <source>BMC Med Imaging</source>. (<year>2019</year>) <volume>19</volume>:<fpage>51</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12880-019-0349-x</pub-id>, PMID: <pub-id pub-id-type="pmid">31262255</pub-id>
</mixed-citation>
</ref>
<ref id="B60">
<label>60</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Balasubramaniam</surname> <given-names>S</given-names></name>
<name><surname>Velmurugan</surname> <given-names>Y</given-names></name>
<name><surname>Jaganathan</surname> <given-names>D</given-names></name>
<name><surname>Dhanasekaran</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>A modified LeNet CNN for breast cancer diagnosis in ultrasound images</article-title>. <source>Diagnostics</source>. (<year>2023</year>) <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics13172746</pub-id>, PMID: <pub-id pub-id-type="pmid">37685284</pub-id>
</mixed-citation>
</ref>
<ref id="B61">
<label>61</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>AlZoubi</surname> <given-names>A</given-names></name>
<name><surname>Lu</surname> <given-names>F</given-names></name>
<name><surname>Zhu</surname> <given-names>Y</given-names></name>
<name><surname>Ying</surname> <given-names>T</given-names></name>
<name><surname>Ahmed</surname> <given-names>M</given-names></name>
<name><surname>Du</surname> <given-names>H</given-names></name>
</person-group>. 
<article-title>Classification of breast lesions in ultrasound images using deep convolutional neural networks: transfer learning versus automatic architecture design</article-title>. <source>Med Biol Eng Comput</source>. (<year>2024</year>) <volume>62</volume>:<page-range>135&#x2013;49</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11517-023-02922-y</pub-id>, PMID: <pub-id pub-id-type="pmid">37735296</pub-id>
</mixed-citation>
</ref>
<ref id="B62">
<label>62</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhao</surname> <given-names>G</given-names></name>
<name><surname>Kong</surname> <given-names>D</given-names></name>
<name><surname>Xu</surname> <given-names>X</given-names></name>
<name><surname>Hu</surname> <given-names>S</given-names></name>
<name><surname>Li</surname> <given-names>Z</given-names></name>
<name><surname>Tian</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>Deep learning-based classification of breast lesions using dynamic ultrasound video</article-title>. <source>Eur J Radiol</source>. (<year>2023</year>) <volume>165</volume>:<elocation-id>110885</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ejrad.2023.110885</pub-id>, PMID: <pub-id pub-id-type="pmid">37290361</pub-id>
</mixed-citation>
</ref>
<ref id="B63">
<label>63</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Guo</surname> <given-names>DH</given-names></name>
<name><surname>Lu</surname> <given-names>CY</given-names></name>
<name><surname>Chen</surname> <given-names>DL</given-names></name>
<name><surname>Yuan</surname> <given-names>JZ</given-names></name>
<name><surname>Duan</surname> <given-names>QM</given-names></name>
<name><surname>Xue</surname> <given-names>Z</given-names></name>
<etal/>
</person-group>. 
<article-title>A multimodal breast cancer diagnosis method based on Knowledge-Augmented Deep Learning</article-title>. <source>Biomed Signal Process Control</source>. (<year>2024</year>) <volume>90</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.bspc.2023.105843</pub-id>, PMID: <pub-id pub-id-type="pmid">41727822</pub-id>
</mixed-citation>
</ref>
<ref id="B64">
<label>64</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Nardone</surname> <given-names>V</given-names></name>
<name><surname>Reginelli</surname> <given-names>A</given-names></name>
<name><surname>Grassi</surname> <given-names>R</given-names></name>
<name><surname>Boldrini</surname> <given-names>L</given-names></name>
<name><surname>Vacca</surname> <given-names>G</given-names></name>
<name><surname>D&#x2019;Ippolito</surname> <given-names>E</given-names></name>
<etal/>
</person-group>. 
<article-title>Delta radiomics: a systematic review</article-title>. <source>La Radiol Medica</source>. (<year>2021</year>) <volume>126</volume>:<page-range>1571&#x2013;83</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11547-021-01436-7</pub-id>, PMID: <pub-id pub-id-type="pmid">34865190</pub-id>
</mixed-citation>
</ref>
<ref id="B65">
<label>65</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gillies</surname> <given-names>RJ</given-names></name>
<name><surname>Kinahan</surname> <given-names>PE</given-names></name>
<name><surname>Hricak</surname> <given-names>H</given-names></name>
</person-group>. 
<article-title>Radiomics: images are more than pictures, they are data</article-title>. <source>Radiology</source>. (<year>2016</year>) <volume>278</volume>:<page-range>563&#x2013;77</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.2015151169</pub-id>, PMID: <pub-id pub-id-type="pmid">26579733</pub-id>
</mixed-citation>
</ref>
<ref id="B66">
<label>66</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lambin</surname> <given-names>P</given-names></name>
<name><surname>Leijenaar</surname> <given-names>RTH</given-names></name>
<name><surname>Deist</surname> <given-names>TM</given-names></name>
<name><surname>Peerlings</surname> <given-names>J</given-names></name>
<name><surname>de Jong</surname> <given-names>EEC</given-names></name>
<name><surname>van Timmeren</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Radiomics: the bridge between medical imaging and personalized medicine</article-title>. <source>Nat Rev Clin Oncol</source>. (<year>2017</year>) <volume>14</volume>:<page-range>749&#x2013;62</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/nrclinonc.2017.141</pub-id>, PMID: <pub-id pub-id-type="pmid">28975929</pub-id>
</mixed-citation>
</ref>
<ref id="B67">
<label>67</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>C</given-names></name>
<name><surname>Zhu</surname> <given-names>X</given-names></name>
<name><surname>Hong</surname> <given-names>JC</given-names></name>
<name><surname>Zheng</surname> <given-names>D</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in radiotherapy treatment planning: present and future</article-title>. <source>Technol Cancer Res Treat</source>. (<year>2019</year>) <volume>18</volume>:<elocation-id>1533033819873922</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/1533033819873922</pub-id>, PMID: <pub-id pub-id-type="pmid">31495281</pub-id>
</mixed-citation>
</ref>
<ref id="B68">
<label>68</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Jiang</surname> <given-names>M</given-names></name>
<name><surname>Li</surname> <given-names>CL</given-names></name>
<name><surname>Luo</surname> <given-names>XM</given-names></name>
<name><surname>Chuan</surname> <given-names>ZR</given-names></name>
<name><surname>Lv</surname> <given-names>WZ</given-names></name>
<name><surname>Li</surname> <given-names>X</given-names></name>
<etal/>
</person-group>. 
<article-title>Ultrasound-based deep learning radiomics in the assessment of pathological complete response to neoadjuvant chemotherapy in locally advanced breast cancer</article-title>. <source>Eur J Cancer (Oxford England: 1990)</source>. (<year>2021</year>) <volume>147</volume>:<fpage>95</fpage>&#x2013;<lpage>105</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ejca.2021.01.028</pub-id>, PMID: <pub-id pub-id-type="pmid">33639324</pub-id>
</mixed-citation>
</ref>
<ref id="B69">
<label>69</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Qi</surname> <given-names>YJ</given-names></name>
<name><surname>Su</surname> <given-names>GH</given-names></name>
<name><surname>You</surname> <given-names>C</given-names></name>
<name><surname>Zhang</surname> <given-names>X</given-names></name>
<name><surname>Xiao</surname> <given-names>Y</given-names></name>
<name><surname>Jiang</surname> <given-names>YZ</given-names></name>
<etal/>
</person-group>. 
<article-title>Radiomics in breast cancer: Current advances and future directions</article-title>. <source>Cell Rep Med</source>. (<year>2024</year>) <volume>5</volume>:<elocation-id>101719</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.xcrm.2024.101719</pub-id>, PMID: <pub-id pub-id-type="pmid">39293402</pub-id>
</mixed-citation>
</ref>
<ref id="B70">
<label>70</label>
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Magny</surname> <given-names>SJ</given-names></name>
<name><surname>Shikhman</surname> <given-names>R</given-names></name>
<name><surname>Keppke</surname> <given-names>AL</given-names></name>
</person-group>. 
<article-title>Breast imaging reporting and data system</article-title>. In: <source>StatPearls</source>. 
<publisher-name>StatPearls Publishing LLC</publisher-name> (<year>2024</year>). Available online at: <uri xlink:href="https://www.ncbi.nlm.nih.gov/books/NBK459169/">https://www.ncbi.nlm.nih.gov/books/NBK459169/</uri>.
</mixed-citation>
</ref>
<ref id="B71">
<label>71</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bi</surname> <given-names>WL</given-names></name>
<name><surname>Hosny</surname> <given-names>A</given-names></name>
<name><surname>Schabath</surname> <given-names>MB</given-names></name>
<name><surname>Giger</surname> <given-names>ML</given-names></name>
<name><surname>Birkbak</surname> <given-names>NJ</given-names></name>
<name><surname>Mehrtash</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence in cancer imaging: Clinical challenges and applications</article-title>. <source>CA: Cancer J Clin</source>. (<year>2019</year>) <volume>69</volume>:<page-range>127&#x2013;57</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3322/caac.21552</pub-id>, PMID: <pub-id pub-id-type="pmid">30720861</pub-id>
</mixed-citation>
</ref>
<ref id="B72">
<label>72</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wu</surname> <given-names>H</given-names></name>
<name><surname>Wang</surname> <given-names>C</given-names></name>
<name><surname>An</surname> <given-names>Q</given-names></name>
<name><surname>Qu</surname> <given-names>X</given-names></name>
<name><surname>Wu</surname> <given-names>X</given-names></name>
<name><surname>Yan</surname> <given-names>Y</given-names></name>
</person-group>. 
<article-title>Comparing the accuracy of shear wave elastography and strain elastography in the diagnosis of breast tumors: A systematic review and meta-analysis</article-title>. <source>Medicine</source>. (<year>2022</year>) <volume>101</volume>:<fpage>e31526</fpage>.
</mixed-citation>
</ref>
<ref id="B73">
<label>73</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zheng</surname> <given-names>X</given-names></name>
<name><surname>Yao</surname> <given-names>Z</given-names></name>
<name><surname>Huang</surname> <given-names>Y</given-names></name>
<name><surname>Yu</surname> <given-names>Y</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Liu</surname> <given-names>Y</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning radiomics can predict axillary lymph node status in early-stage breast cancer</article-title>. <source>Nat Commun</source>. (<year>2020</year>) <volume>11</volume>:<fpage>1236</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41467-020-15027-z</pub-id>, PMID: <pub-id pub-id-type="pmid">32144248</pub-id>
</mixed-citation>
</ref>
<ref id="B74">
<label>74</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>Y</given-names></name>
<name><surname>Yao</surname> <given-names>Z</given-names></name>
<name><surname>Li</surname> <given-names>L</given-names></name>
<name><surname>Mao</surname> <given-names>R</given-names></name>
<name><surname>Huang</surname> <given-names>W</given-names></name>
<name><surname>Hu</surname> <given-names>Z</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning radiopathomics based on preoperative US images and biopsy whole slide images can distinguish between luminal and non-luminal tumors in early-stage breast cancers</article-title>. <source>EBioMedicine</source>. (<year>2023</year>) <volume>94</volume>:<elocation-id>104706</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ebiom.2023.104706</pub-id>, PMID: <pub-id pub-id-type="pmid">37478528</pub-id>
</mixed-citation>
</ref>
<ref id="B75">
<label>75</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>H</given-names></name>
<name><surname>Zou</surname> <given-names>L</given-names></name>
<name><surname>Xu</surname> <given-names>N</given-names></name>
<name><surname>Shen</surname> <given-names>H</given-names></name>
<name><surname>Zhang</surname> <given-names>Y</given-names></name>
<name><surname>Wan</surname> <given-names>P</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning radiomics based prediction of axillary lymph node metastasis in breast cancer</article-title>. <source>NPJ Breast Cancer</source>. (<year>2024</year>) <volume>10</volume>:<fpage>22</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41523-024-00628-4</pub-id>, PMID: <pub-id pub-id-type="pmid">38472210</pub-id>
</mixed-citation>
</ref>
<ref id="B76">
<label>76</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Huang</surname> <given-names>Y</given-names></name>
<name><surname>Han</surname> <given-names>L</given-names></name>
<name><surname>Dou</surname> <given-names>H</given-names></name>
<name><surname>Luo</surname> <given-names>H</given-names></name>
<name><surname>Yuan</surname> <given-names>Z</given-names></name>
<name><surname>Liu</surname> <given-names>Q</given-names></name>
<etal/>
</person-group>. 
<article-title>Two-stage CNNs for computerized BI-RADS categorization in breast ultrasound images</article-title>. <source>Biomed Eng Online</source>. (<year>2019</year>) <volume>18</volume>:<elocation-id>8</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12938-019-0626-5</pub-id>, PMID: <pub-id pub-id-type="pmid">30678680</pub-id>
</mixed-citation>
</ref>
<ref id="B77">
<label>77</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ciritsis</surname> <given-names>A</given-names></name>
<name><surname>Rossi</surname> <given-names>C</given-names></name>
<name><surname>Eberhard</surname> <given-names>M</given-names></name>
<name><surname>Marcon</surname> <given-names>M</given-names></name>
<name><surname>Becker</surname> <given-names>AS</given-names></name>
<name><surname>Boss</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Automatic classification of ultrasound breast lesions using a deep convolutional neural network mimicking human decision-making</article-title>. <source>Eur Radiol</source>. (<year>2019</year>) <volume>29</volume>:<page-range>5458&#x2013;68</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-019-06118-7</pub-id>, PMID: <pub-id pub-id-type="pmid">30927100</pub-id>
</mixed-citation>
</ref>
<ref id="B78">
<label>78</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>G</given-names></name>
<name><surname>Tian</surname> <given-names>H</given-names></name>
<name><surname>Wu</surname> <given-names>H</given-names></name>
<name><surname>Huang</surname> <given-names>Z</given-names></name>
<name><surname>Yang</surname> <given-names>K</given-names></name>
<name><surname>Li</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence for non-mass breast lesions detection and classification on ultrasound images: a comparative study</article-title>. <source>BMC Med Inf Decision Making</source>. (<year>2023</year>) <volume>23</volume>:<fpage>174</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12911-023-02277-2</pub-id>, PMID: <pub-id pub-id-type="pmid">37667320</pub-id>
</mixed-citation>
</ref>
<ref id="B79">
<label>79</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Park</surname> <given-names>KW</given-names></name>
<name><surname>Park</surname> <given-names>S</given-names></name>
<name><surname>Shon</surname> <given-names>I</given-names></name>
<name><surname>Kim</surname> <given-names>MJ</given-names></name>
<name><surname>Han</surname> <given-names>BK</given-names></name>
<name><surname>Ko</surname> <given-names>EY</given-names></name>
<etal/>
</person-group>. 
<article-title>Non-mass lesions detected by breast US: stratification of cancer risk for clinical management</article-title>. <source>Eur Radiol</source>. (<year>2021</year>) <volume>31</volume>:<page-range>1693&#x2013;706</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330-020-07168-y</pub-id>, PMID: <pub-id pub-id-type="pmid">32888070</pub-id>
</mixed-citation>
</ref>
<ref id="B80">
<label>80</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Neri</surname> <given-names>E</given-names></name>
<name><surname>Aghakhanyan</surname> <given-names>G</given-names></name>
<name><surname>Zerunian</surname> <given-names>M</given-names></name>
<name><surname>Gandolfo</surname> <given-names>N</given-names></name>
<name><surname>Grassi</surname> <given-names>R</given-names></name>
<name><surname>Miele</surname> <given-names>V</given-names></name>
<etal/>
</person-group>. 
<article-title>Explainable AI in radiology: a white paper of the Italian Society of Medical and Interventional Radiology</article-title>. <source>La Radiol Medica</source>. (<year>2023</year>) <volume>128</volume>:<page-range>755&#x2013;64</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11547-023-01634-5</pub-id>, PMID: <pub-id pub-id-type="pmid">37155000</pub-id>
</mixed-citation>
</ref>
<ref id="B81">
<label>81</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sheu</surname> <given-names>RK</given-names></name>
<name><surname>Pardeshi</surname> <given-names>MS</given-names></name>
</person-group>. 
<article-title>A survey on medical explainable AI (XAI): recent progress, explainability approach, human interaction and scoring system</article-title>. <source>Sens (Basel)</source>. (<year>2022</year>) <volume>22</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s22208068</pub-id>, PMID: <pub-id pub-id-type="pmid">36298417</pub-id>
</mixed-citation>
</ref>
<ref id="B82">
<label>82</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yan</surname> <given-names>M</given-names></name>
<name><surname>He</surname> <given-names>D</given-names></name>
<name><surname>Sun</surname> <given-names>Y</given-names></name>
<name><surname>Huang</surname> <given-names>L</given-names></name>
<name><surname>Cai</surname> <given-names>L</given-names></name>
<name><surname>Wang</surname> <given-names>C</given-names></name>
<etal/>
</person-group>. 
<article-title>Comparative analysis of nomogram and machine learning models for predicting axillary lymph node metastasis in early-stage breast cancer: A study on clinically and ultrasound-negative axillary cases across two centers</article-title>. <source>Ultrasound Med Biol</source>. (<year>2025</year>) <volume>51</volume>:<page-range>463&#x2013;74</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ultrasmedbio.2024.11.003</pub-id>, PMID: <pub-id pub-id-type="pmid">39627056</pub-id>
</mixed-citation>
</ref>
<ref id="B83">
<label>83</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Renfrew</surname> <given-names>DL</given-names></name>
<name><surname>Franken</surname> <given-names>EA</given-names> <suffix>Jr.</suffix></name>
<name><surname>Berbaum</surname> <given-names>KS</given-names></name>
<name><surname>Weigelt</surname> <given-names>FH</given-names></name>
<name><surname>Abu-Yousef</surname> <given-names>MM</given-names></name>
</person-group>. 
<article-title>Error in radiology: classification and lessons in 182 cases presented at a problem case conference</article-title>. <source>Radiology</source>. (<year>1992</year>) <volume>183</volume>:<page-range>145&#x2013;50</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiology.183.1.1549661</pub-id>, PMID: <pub-id pub-id-type="pmid">1549661</pub-id>
</mixed-citation>
</ref>
<ref id="B84">
<label>84</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Berbaum</surname> <given-names>KS</given-names></name>
<name><surname>Franken</surname> <given-names>EA</given-names> <suffix>Jr.</suffix></name>
<name><surname>Dorfman</surname> <given-names>DD</given-names></name>
<name><surname>Rooholamini</surname> <given-names>SA</given-names></name>
<name><surname>Kathol</surname> <given-names>MH</given-names></name>
<name><surname>Barloon</surname> <given-names>TJ</given-names></name>
<etal/>
</person-group>. 
<article-title>Satisfaction of search in diagnostic radiology</article-title>. <source>Invest Radiol</source>. (<year>1990</year>) <volume>25</volume>:<page-range>133&#x2013;40</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/00004424-199002000-00006</pub-id>, PMID: <pub-id pub-id-type="pmid">2312249</pub-id>
</mixed-citation>
</ref>
<ref id="B85">
<label>85</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Petrick</surname> <given-names>N</given-names></name>
<name><surname>Sahiner</surname> <given-names>B</given-names></name>
<name><surname>Armato</surname> <given-names>SG</given-names> <suffix>3rd</suffix></name>
<name><surname>Bert</surname> <given-names>A</given-names></name>
<name><surname>Correale</surname> <given-names>L</given-names></name>
<name><surname>Delsanto</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>Evaluation of computer-aided detection and diagnosis systems</article-title>. <source>Med Phys</source>. (<year>2013</year>) <volume>40</volume>:<fpage>087001</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1118/1.4816310</pub-id>, PMID: <pub-id pub-id-type="pmid">23927365</pub-id>
</mixed-citation>
</ref>
<ref id="B86">
<label>86</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Jiang</surname> <given-names>J</given-names></name>
<name><surname>Jiang</surname> <given-names>X</given-names></name>
<name><surname>Xu</surname> <given-names>L</given-names></name>
<name><surname>Zhang</surname> <given-names>Y</given-names></name>
<name><surname>Zheng</surname> <given-names>Y</given-names></name>
<name><surname>Kong</surname> <given-names>D</given-names></name>
</person-group>. 
<article-title>Noise-robustness test for ultrasound breast nodule neural network models as medical devices</article-title>. <source>Front Oncol</source>. (<year>2023</year>) <volume>13</volume>:<elocation-id>1177225</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2023.1177225</pub-id>, PMID: <pub-id pub-id-type="pmid">37427110</pub-id>
</mixed-citation>
</ref>
<ref id="B87">
<label>87</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Park</surname> <given-names>HJ</given-names></name>
<name><surname>Kim</surname> <given-names>SM</given-names></name>
<name><surname>La Yun</surname> <given-names>B</given-names></name>
<name><surname>Jang</surname> <given-names>M</given-names></name>
<name><surname>Kim</surname> <given-names>B</given-names></name>
<name><surname>Jang</surname> <given-names>JY</given-names></name>
<etal/>
</person-group>. 
<article-title>A computer-aided diagnosis system using artificial intelligence for the diagnosis and characterization of breast masses on ultrasound: Added value for the inexperienced breast radiologist</article-title>. <source>Medicine</source>. (<year>2019</year>) <volume>98</volume>:<fpage>e14146</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/md.0000000000014146</pub-id>, PMID: <pub-id pub-id-type="pmid">30653149</pub-id>
</mixed-citation>
</ref>
<ref id="B88">
<label>88</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kim</surname> <given-names>K</given-names></name>
<name><surname>Song</surname> <given-names>MK</given-names></name>
<name><surname>Kim</surname> <given-names>EK</given-names></name>
<name><surname>Yoon</surname> <given-names>JH</given-names></name>
</person-group>. 
<article-title>Clinical application of S-Detect to breast masses on ultrasonography: a study evaluating the diagnostic performance and agreement with a dedicated breast radiologist</article-title>. <source>Ultrasonography (Seoul Korea)</source>. (<year>2017</year>) <volume>36</volume>:<fpage>3</fpage>&#x2013;<lpage>9</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.14366/usg.16012</pub-id>, PMID: <pub-id pub-id-type="pmid">27184656</pub-id>
</mixed-citation>
</ref>
<ref id="B89">
<label>89</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Jiang</surname> <given-names>S</given-names></name>
<name><surname>Wang</surname> <given-names>H</given-names></name>
<name><surname>Guo</surname> <given-names>YH</given-names></name>
<name><surname>Liu</surname> <given-names>B</given-names></name>
<name><surname>Hou</surname> <given-names>Y</given-names></name>
<etal/>
</person-group>. 
<article-title>CAD algorithms for solid breast masses discrimination: evaluation of the accuracy and interobserver variability</article-title>. <source>Ultrasound Med Biol</source>. (<year>2010</year>) <volume>36</volume>:<page-range>1273&#x2013;81</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ultrasmedbio.2010.05.010</pub-id>, PMID: <pub-id pub-id-type="pmid">20691917</pub-id>
</mixed-citation>
</ref>
<ref id="B90">
<label>90</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lee</surname> <given-names>YJ</given-names></name>
<name><surname>Choi</surname> <given-names>SY</given-names></name>
<name><surname>Kim</surname> <given-names>KS</given-names></name>
<name><surname>Yang</surname> <given-names>PS</given-names></name>
</person-group>. 
<article-title>Variability in observer performance between faculty members and residents using breast imaging reporting and data system (BI-RADS)-ultrasound, fifth edition (2013)</article-title>. <source>Iranian J Radiol: Q J Published by Iranian Radiol Society</source>. (<year>2016</year>) <volume>13</volume>:<fpage>e28281</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.5812/iranjradiol.28281</pub-id>, PMID: <pub-id pub-id-type="pmid">27853492</pub-id>
</mixed-citation>
</ref>
<ref id="B91">
<label>91</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bartolotta</surname> <given-names>TV</given-names></name>
<name><surname>Orlando</surname> <given-names>A</given-names></name>
<name><surname>Cantisani</surname> <given-names>V</given-names></name>
<name><surname>Matranga</surname> <given-names>D</given-names></name>
<name><surname>Ienzi</surname> <given-names>R</given-names></name>
<name><surname>Cirino</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Focal breast lesion characterization according to the BI-RADS US lexicon: role of a computer-aided decision-making support</article-title>. <source>La Radiol Medica</source>. (<year>2018</year>) <volume>123</volume>:<fpage>498</fpage>&#x2013;<lpage>506</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11547-018-0874-7</pub-id>, PMID: <pub-id pub-id-type="pmid">29569216</pub-id>
</mixed-citation>
</ref>
<ref id="B92">
<label>92</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Buchbinder</surname> <given-names>SS</given-names></name>
<name><surname>Leichter</surname> <given-names>IS</given-names></name>
<name><surname>Lederman</surname> <given-names>RB</given-names></name>
<name><surname>Novak</surname> <given-names>B</given-names></name>
<name><surname>Bamberger</surname> <given-names>PN</given-names></name>
<name><surname>Sklair-Levy</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>Computer-aided classification of BI-RADS category 3 breast lesions</article-title>. <source>Radiology</source>. (<year>2004</year>) <volume>230</volume>:<page-range>820&#x2013;3</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.2303030089</pub-id>, PMID: <pub-id pub-id-type="pmid">14739315</pub-id>
</mixed-citation>
</ref>
<ref id="B93">
<label>93</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>G&#xf3;mez-Flores</surname> <given-names>W</given-names></name>
<name><surname>Coelho de Albuquerque Pereira</surname> <given-names>W</given-names></name>
</person-group>. 
<article-title>A comparative study of pre-trained convolutional neural networks for semantic segmentation of breast tumors in ultrasound</article-title>. <source>Comput Biol Med</source>. (<year>2020</year>) <volume>126</volume>:<elocation-id>104036</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.104036</pub-id>, PMID: <pub-id pub-id-type="pmid">33059238</pub-id>
</mixed-citation>
</ref>
<ref id="B94">
<label>94</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhao</surname> <given-names>Z</given-names></name>
<name><surname>Hou</surname> <given-names>S</given-names></name>
<name><surname>Li</surname> <given-names>S</given-names></name>
<name><surname>Sheng</surname> <given-names>D</given-names></name>
<name><surname>Liu</surname> <given-names>Q</given-names></name>
<name><surname>Chang</surname> <given-names>C</given-names></name>
<etal/>
</person-group>. 
<article-title>Application of deep learning to reduce the rate of Malignancy among BI-RADS 4A breast lesions based on ultrasonography</article-title>. <source>Ultrasound Med Biol</source>. (<year>2022</year>) <volume>48</volume>:<page-range>2267&#x2013;75</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ultrasmedbio.2022.06.019</pub-id>, PMID: <pub-id pub-id-type="pmid">36055860</pub-id>
</mixed-citation>
</ref>
<ref id="B95">
<label>95</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yang</surname> <given-names>L</given-names></name>
<name><surname>Ma</surname> <given-names>Z</given-names></name>
</person-group>. 
<article-title>Nomogram based on super-resolution ultrasound images outperforms in predicting benign and Malignant breast lesions</article-title>. <source>Breast Cancer (Dove Med Press)</source>. (<year>2023</year>) <volume>15</volume>:<page-range>867&#x2013;78</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2147/bctt.s435510</pub-id>, PMID: <pub-id pub-id-type="pmid">38074418</pub-id>
</mixed-citation>
</ref>
<ref id="B96">
<label>96</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pesapane</surname> <given-names>F</given-names></name>
<name><surname>Codari</surname> <given-names>M</given-names></name>
<name><surname>Sardanelli</surname> <given-names>F</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in medical imaging: threat or opportunity? Radiologists again at the forefront of innovation in medicine</article-title>. <source>Eur Radiol Experiment</source>. (<year>2018</year>) <volume>2</volume>:<fpage>35</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s41747-018-0061-6</pub-id>, PMID: <pub-id pub-id-type="pmid">30353365</pub-id>
</mixed-citation>
</ref>
<ref id="B97">
<label>97</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mango</surname> <given-names>VL</given-names></name>
<name><surname>Sun</surname> <given-names>M</given-names></name>
<name><surname>Wynn</surname> <given-names>RT</given-names></name>
<name><surname>Ha</surname> <given-names>R</given-names></name>
</person-group>. 
<article-title>Should we ignore, follow, or biopsy? Impact of artificial intelligence decision support on breast ultrasound lesion assessment</article-title>. <source>AJR Am J Roentgenol</source>. (<year>2020</year>) <volume>214</volume>:<page-range>1445&#x2013;52</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2214/ajr.19.21872</pub-id>, PMID: <pub-id pub-id-type="pmid">32319794</pub-id>
</mixed-citation>
</ref>
<ref id="B98">
<label>98</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Browne</surname> <given-names>JL</given-names></name>
<name><surname>Pascual</surname> <given-names>M</given-names></name>
<name><surname>Perez</surname> <given-names>J</given-names></name>
<name><surname>Salazar</surname> <given-names>S</given-names></name>
<name><surname>Valero</surname> <given-names>B</given-names></name>
<name><surname>Rodriguez</surname> <given-names>I</given-names></name>
<etal/>
</person-group>. 
<article-title>AI: can it make a difference to the predictive value of ultrasound breast biopsy</article-title>? <source>Diagnost (Basel Switzerland)</source>. (<year>2023</year>) <volume>13</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics13040811</pub-id>, PMID: <pub-id pub-id-type="pmid">36832299</pub-id>
</mixed-citation>
</ref>
<ref id="B99">
<label>99</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Cerdas</surname> <given-names>MG</given-names></name>
<name><surname>Farhat</surname> <given-names>J</given-names></name>
<name><surname>Elshafie</surname> <given-names>SI</given-names></name>
<name><surname>Mariyam</surname> <given-names>F</given-names></name>
<name><surname>James</surname> <given-names>L</given-names></name>
<name><surname>Qureshi</surname> <given-names>AK</given-names></name>
<etal/>
</person-group>. 
<article-title>Exploring the evolution of breast cancer imaging: A review of conventional and emerging modalities</article-title>. <source>Cureus</source>. (<year>2025</year>) <volume>17</volume>:<fpage>e82762</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.7759/cureus.82762</pub-id>, PMID: <pub-id pub-id-type="pmid">40416096</pub-id>
</mixed-citation>
</ref>
<ref id="B100">
<label>100</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Liu</surname> <given-names>Y</given-names></name>
<name><surname>Wang</surname> <given-names>Y</given-names></name>
<name><surname>Hu</surname> <given-names>X</given-names></name>
<name><surname>Wang</surname> <given-names>X</given-names></name>
<name><surname>Xue</surname> <given-names>L</given-names></name>
<name><surname>Pang</surname> <given-names>Q</given-names></name>
<etal/>
</person-group>. 
<article-title>Multimodality deep learning radiomics predicts pathological response after neoadjuvant chemoradiotherapy for esophageal squamous cell carcinoma</article-title>. <source>Insights Imaging</source>. (<year>2024</year>) <volume>15</volume>:<fpage>277</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13244-024-01851-0</pub-id>, PMID: <pub-id pub-id-type="pmid">39546168</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2667497">Arvind Mukundan</ext-link>, National Chung Cheng University, Taiwan</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3271610">Riya Karmakar</ext-link>, National Chung Cheng University, Taiwan</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3154322">Wei-min Li</ext-link>, Affiliated Hospital of Jiangnan University, China</p></fn>
</fn-group>
</back>
</article>