<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="systematic-review" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Oncol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Oncology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Oncol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2234-943X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fonc.2025.1626286</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Systematic Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Artificial intelligence based on ultrasound for initial diagnosis of malignant ovarian cancer: a systematic review and meta-analysis</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Li</surname><given-names>Rong</given-names></name>
<uri xlink:href="https://loop.frontiersin.org/people/3063137/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Lei</surname><given-names>Jiehua</given-names></name>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Tang</surname><given-names>Xiaomei</given-names></name>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Zheng</surname><given-names>Shiying</given-names></name>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Qu</surname><given-names>Jiajia</given-names></name>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Xu</surname><given-names>Yueyue</given-names></name>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Zheng</surname><given-names>Hongyu</given-names></name>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><institution>Departments of Ultrasound, The People&#x2019;s Hospital of Guangxi Zhuang Autonomous Region</institution>, <city>Nanning</city>, <state>Guangxi</state>,&#xa0;<country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Hongyu Zheng, <email xlink:href="mailto:zhenghongyu@hotmail.com">zhenghongyu@hotmail.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2025-12-01">
<day>01</day>
<month>12</month>
<year>2025</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>15</volume>
<elocation-id>1626286</elocation-id>
<history>
<date date-type="received">
<day>10</day>
<month>05</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>14</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>07</day>
<month>10</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Li, Lei, Tang, Zheng, Qu, Xu and Zheng.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Li, Lei, Tang, Zheng, Qu, Xu and Zheng</copyright-holder>
<license>
<ali:license_ref start_date="2025-12-01">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Purpose</title>
<p>This meta-analysis aimed to evaluate the diagnostic performance of artificial intelligence (AI) in ultrasound imaging for the initial diagnosis of malignant ovarian cancer, comparing its performance to that of sonographers.</p>
</sec>
<sec>
<title>Methods</title>
<p>A systematic literature search was conducted in PubMed, Web of Science, Embase, and the Cochrane Library up to February 2025. Inclusion criteria targeted studies employing AI algorithms to analyze ultrasound images in patients with suspected ovarian cancer, using pathology as the reference standard. Bivariate random-effects models were utilized to aggregate sensitivity, specificity, and area under the curve (AUC). The methodological quality of the included studies was assessed using a modified version of the Quality Assessment of Diagnostic Accuracy Studies-2 (QUADAS-2) tool.</p>
</sec>
<sec>
<title>Results</title>
<p>Eighteen studies encompassing a total of 22,697 total patients/images/lesions were analyzed. AI demonstrated a sensitivity of 0.95 (95% CI: 0.88-0.98) and specificity of 0.95 (95% CI: 0.89-0.98) in internal validation sets, yielding an AUC of 0.98. In external validation, sensitivity was 0.78 (95% CI: 0.56-0.91) and specificity was 0.88 (95% CI: 0.76-0.95), with an AUC of 0.91. In comparison, sonographers exhibited a sensitivity of 0.83 (95% CI: 0.62-0.94), specificity of 0.84 (95% CI: 0.79-0.88), and an AUC of 0.87. These results indicate that ultrasound-based AI significantly outperforms sonographer diagnostics. Meta-regression analysis indicated that the heterogeneity was primarily attributed to the analysis method (image-based vs. patient-based, specificity <italic>P</italic> = 0.01).</p>
</sec>
<sec>
<title>Conclusions</title>
<p>AI based on ultrasound diagnosis demonstrates excellent performance for malignant ovarian cancer detection, with potentially superior performance compared to sonographers. Despite high heterogeneity across studies and the observed publication bias, these results indicate the potential for AI integration into clinical practice. Further studies with external, multicenter prospective head-to-head design are still needed.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>ovarian neoplasms</kwd>
<kwd>ultrasonography</kwd>
<kwd>diagnosis</kwd>
<kwd>meta-analysis</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declare financial support was received for the research and/or publication of this article. This study was supported by the Guangxi Key Research and Development Plan (Guike AB23026042).</funding-statement>
</funding-group>
<counts>
<fig-count count="7"/>
<table-count count="4"/>
<equation-count count="0"/>
<ref-count count="35"/>
<page-count count="15"/>
<word-count count="6327"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Gynecological Oncology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<title>Introduction</title>
<p>Ovarian cancer is the deadliest form of cancer affecting the female reproductive system. Its early clinical symptoms are often subtle, leading to diagnosis at advanced stages and resulting in poor prognosis (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). It is the leading cause of mortality among gynecological cancers, surpassing cervical and endometrial cancers in terms of lethality, with a 10-year survival rate of only 35% across all stages (<xref ref-type="bibr" rid="B3">3</xref>). The insidious nature of ovarian cancer presents a significant diagnostic challenge, as subtle or non-specific symptoms often result in delayed clinical presentations. Early and accurate diagnosis is pivotal to reducing mortality, improving treatment outcomes, and minimizing unnecessary surgical procedures for patients presenting with ovarian masses.</p>
<p>Conventional diagnostic modalities for ovarian cancer include imaging tools such as computed tomography (CT), magnetic resonance imaging (MRI), serum biomarkers like CA125 and HE4, pathological biopsy, and ultrasound. CT and MRI are common non-invasive methods, but their ability to fully analyze tumor microenvironments is limited because human vision has natural limitations (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B4">4</xref>). Moreover, variability in radiological interpretation often depends on the expertise of the operator, which adding a layer of subjectivity to diagnostic accuracy. Pathological biopsy, though definitive, is invasive and not always suitable for all patients (<xref ref-type="bibr" rid="B5">5</xref>, <xref ref-type="bibr" rid="B6">6</xref>). Serum biomarkers, despite being widely used, often lack specificity, as elevated levels can occur in conditions unrelated to ovarian malignancies (<xref ref-type="bibr" rid="B7">7</xref>). Ultrasound, particularly transvaginal ultrasound, has emerged as one of the most accessible and cost-effective tools for ovarian tumor evaluation (<xref ref-type="bibr" rid="B5">5</xref>). However, conventional ultrasound diagnostics primarily rely on morphological imaging and visual assessment, which are significantly influenced by operator-dependent variability. Quantitative data embedded within ultrasound images, which reportedly carry predictive advantages over traditional imaging metrics, often remain underexplored in clinical settings (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B8">8</xref>). Thus, while these diagnostic methods offer valuable insights, their limitations underscore the urgent need for innovative solutions to improve diagnostic precision and reliability.</p>
<p>Artificial intelligence (AI) has demonstrated remarkable potential in improving diagnostic performance, especially in the analysis of medical imaging such as ultrasound. By leveraging machine learning and deep learning technologies, AI can extract complex patterns from imaging data and provide quantitative assessments of radiographic features that are often imperceptible to the human eye (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B9">9</xref>). Recent studies have demonstrated that AI-assisted diagnostic performance in ovarian cancer can achieve sensitivity and specificity rates as high as 81% and 92%, which outperforming traditional imaging-based diagnostics (<xref ref-type="bibr" rid="B1">1</xref>). AI-based models can enhance the interpretation of ultrasound images by systematically quantifying tumor characteristics such as lesion size, echotexture, and morphological irregularities (<xref ref-type="bibr" rid="B8">8</xref>). However, despite their promising potential, AI based on ultrasound applications face significant controversies. First, the generalizability of AI models needs further evaluation, particularly regarding their diagnostic performance in external validation sets (<xref ref-type="bibr" rid="B6">6</xref>). Second, the relative diagnostic performance of AI compared to sonographers also requires assessment (<xref ref-type="bibr" rid="B10">10</xref>). These challenges underscore the necessity for systematic evaluations to clarify the performance variability of AI-enabled ultrasound diagnostics for ovarian cancer.</p>
<p>Therefore, this meta-analysis aims to systematically assess the diagnostic performance of AI-based ultrasound models for the initial diagnosis of ovarian cancer, and compare its performance to that of sonographers.</p>
</sec>
<sec id="s2">
<title>Methods</title>
<p>The meta-analysis was conducted in strict accordance with the Preferred Reporting Items for Systematic Reviews and Meta-Analyses of Diagnostic Test Accuracy (PRISMA-DTA) guidelines (<xref ref-type="bibr" rid="B11">11</xref>).</p>
<sec id="s2_1">
<title>Search strategy</title>
<p>A systematic search was conducted across PubMed, Embase, Cochrane Library, and Web of Science up to February 2025. The search strategy integrated three conceptual domains: AI (e.g., &#x201c;deep learning,&#x201d; &#x201c;machine learning&#x201d;, &#x201c; artificial intelligence&#x201d;), ovarian cancer (e.g., &#x201c;Ovarian Neoplasms,&#x201d; &#x201c;Cancer of the Ovary&#x201d;), and ultrasound imaging (e.g., &#x201c;ultrasonography,&#x201d; &#x201c;Echography&#x201d;). Free terms and MeSH headings were combined for comprehensive coverage (<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Table&#xa0;1</bold></xref>). In addition, reference lists of included studies were manually screened. In order to minimize selection bias, the search was updated in March 2025 to include newly published studies.</p>
</sec>
<sec id="s2_2">
<title>Inclusion and exclusion criteria</title>
<p>Inclusion criteria were established following the PICOS framework. Population (P): Patients suspected of ovarian malignancy or borderline ovarian tumors, which were classified as the malignant (positive) group in our study. Intervention (I): Application of AI algorithms to predict malignancy using ovarian ultrasound images, including transvaginal, transabdominal and transrectal approaches. Comparison (C): Pathological outcomes was reference standard. Outcomes (O): Studies reporting diagnostic performance metrics, including sensitivity, specificity, and AUC were included. Study design (S): Retrospective or prospective studies published in peer-reviewed journals.</p>
<p>Exclusion criteria encompassed: (1) studies lacking sufficient data to calculate true positive (TP), false positive (FP), false negative (FN), and true negative (TN) values; (2) non-relevant publication types (e.g., case reports, conference abstracts, reviews, meta-analyses, or commentaries); (3) non-English literature; (4) studies utilizing non-ultrasound-based AI methodologies (e.g., CT or MRI); (5) studies employing non-pathological reference standards.</p>
</sec>
<sec id="s2_3">
<title>Quality assessment</title>
<p>The methodological quality of the included studies was assessed using a modified version of the Quality Assessment of Diagnostic Accuracy Studies-2 (QUADAS-2) tool (<xref ref-type="bibr" rid="B12">12</xref>). To enhance relevance, we integrated specific domain criteria from the Prediction model Risk of Bias Assessment Tool (PROBAST) to replace less applicable components of the original QUADAS-2 framework (<xref ref-type="bibr" rid="B13">13</xref>). The revised tool evaluated four domains: participant selection, index test (AI algorithm), reference standard, and analysis. Within each domain, risk of bias and concerns regarding applicability were systematically assessed. Two independent reviewers (X.T. and S.Z.) conducted the assessments using the modified QUADAS-2 tool. Disagreements were resolved through iterative discussions to ensure consensus.</p>
</sec>
<sec id="s2_4">
<title>Data extraction</title>
<p>Two independent reviewers (R.L. and J.L.) conducted preliminary screening of titles and abstracts from the remaining literature to assess potential eligibility. Discrepancies in evaluations were resolved through adjudication by a third reviewer (H.Z.). Data extraction items included: author, publication year, country, study design, type of ultrasound, reference standard, analysis, patients/lesions/images per set, number of malignant ovarian cancer patients/lesions/images, AI method, AI model, optimal AI algorithms, data splitting method, the diagnostic matrix for internal and external validation sets, sonographers, scanner modality (system), evaluation time, and frequency (MHz).</p>
<p>For studies included in the systematic review but lacking meta-analyzable data, the research team contacted corresponding authors via email to obtain missing information. As most studies did not report complete diagnostic contingency tables, two methods were employed to construct the contingency table: (1) back-calculating total cases, along with sensitivity, specificity, and the number of malignant ovarian cancer patients based on the reference standard; (2) determining optimal sensitivity and specificity parameters through receiver operating characteristic (ROC) curve analysis using Youden&#x2019;s index.</p>
</sec>
<sec id="s2_5">
<title>Outcome measures</title>
<p>The primary outcome measures included sensitivity, specificity, and AUC from internal validation sets, external validation sets, and sonographer. Sensitivity (also termed recall or true positive rate) quantified the probability of the AI model correctly identifying malignant cases (including both ovarian cancer and borderline ovarian tumors), calculated as TP/(TP+FN). Specificity (true negative rate) represented the model&#x2019;s ability to accurately identify non-malignant cases, computed as TN/(TN+FP). AUC, derived from the ROC curve, served as a composite metric for diagnostic discriminative performance. For studies reporting multiple contingency tables across datasets (e.g., two external validation cohorts), all independent datasets were extracted. When multiple AI models or algorithms were presented, in order to avoid patient overlap, only the optimal-performing model (highest AUC) from internal/external validation sets or sonographer comparisons was included.</p>
</sec>
<sec id="s2_6">
<title>Statistical analysis</title>
<p>Statistical analyses were conducted using a bivariate random-effects model to pool sensitivity and specificity estimates for AI performance across internal validation, external validation, and sonographer datasets (<xref ref-type="bibr" rid="B14">14</xref>). Forest plots visually summarized pooled sensitivities and specificities, while SROC curves illustrated combined estimates with 95% confidence intervals (CIs) and prediction intervals. Heterogeneity between studies was quantified using the I<sup>2</sup> statistic, with a threshold of 50% indicating possible significant heterogeneity (<xref ref-type="bibr" rid="B15">15</xref>). For internal validation datasets (&gt;10 studies with I<sup>2</sup> &gt; 50%), subgroup analysis and meta-regression were conducted, evaluating covariates such as study design, analysis type, AI method, ultrasound type, AI model, and optimal AI algorithms. Clinical utility was assessed using Fagan&#x2019;s nomogram, while publication bias was evaluated via Deeks&#x2019; funnel plot asymmetry test (<xref ref-type="bibr" rid="B16">16</xref>). The 95% CIs of the AUCs of AI and sonographer were compared. Nonoverlapping 95% CIs between two subgroups indicated a statistically significant difference. All analyses were conducted in Stata 15.1 (Midas package), with statistical significance defined as <italic>P</italic> &lt; 0.05. Study quality and risk of bias were conducted using RevMan 5.4 (Cochrane Collaboration).</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<title>Results</title>
<sec id="s3_1">
<title>Study selection</title>
<p>A systematic search of four databases identified 302 studies, with 108 duplicates removed, yielding 194 unique records for initial screening. A total of 165 articles were excluded during title/abstract screening due to non-relevant publication types (e.g., case reports, conference abstracts, reviews, meta-analyses, or commentaries) or clearly irrelevant titles/abstracts. Full-text eligibility assessment of the remaining 29 studies excluded 11 additional articles: four lacked extractable diagnostic performance metrics (TP, FP, FN, TN), four utilized non-ultrasound-based AI models, and three employed non-pathological reference standards. Eighteen studies met all criteria for final inclusion (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B32">32</xref>). The selection process adhered to PRISMA guidelines, as detailed in <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) flow diagram depicting the study selection process.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-15-1626286-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating the study identification via databases for meta-analysis. Initially, 300 records were identified from PubMed, Embase, Web of Science, and Cochrane, with 2 additional records from other sources. After removing duplicates, 194 records remained. Of these, 165 were excluded due to irrelevance or being case reports, reviews, or animal experiments. 29 full-text articles were assessed, with 11 further excluded due to unavailable data, non-U.S. based AI, or non-pathology. Finally, 18 studies were included in the meta-analysis.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_2">
<title>Study description and quality assessment</title>
<p>A total of 18 eligible studies were identified, including 17 studies (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B23">23</xref>&#x2013;<xref ref-type="bibr" rid="B32">32</xref>) in the internal validation set with 22,697 total patients/images/lesions(range: 1-7,995), and three studies (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B22">22</xref>) in external validation involving 2,297 patients(range:2-662). The studies were published between 1999 and 2024. Among them, 13 studies included in the meta-analysis were retrospective (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B26">26</xref>&#x2013;<xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B30">30</xref>&#x2013;<xref ref-type="bibr" rid="B32">32</xref>), while five were prospective (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B29">29</xref>). Transvaginal ultrasound alone was used in nine studies (<xref ref-type="bibr" rid="B18">18</xref>&#x2013;<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B32">32</xref>), combined transvaginal and transabdominal ultrasound in four (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B26">26</xref>), transabdominal ultrasound alone in one (<xref ref-type="bibr" rid="B31">31</xref>), transvaginal combined with transrectal ultrasound in one (<xref ref-type="bibr" rid="B9">9</xref>), and unspecified methods in three studies (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B29">29</xref>). Radiomic &amp; Clinical AI models were employed in eight studies (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>&#x2013;<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B29">29</xref>, <xref ref-type="bibr" rid="B30">30</xref>), while radiomic models alone were used in ten studies (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B31">31</xref>, <xref ref-type="bibr" rid="B32">32</xref>). Deep learning methods were utilized in eight studies (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B31">31</xref>), with the remaining ten employing machine learning (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>&#x2013;<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B28">28</xref>&#x2013;<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B32">32</xref>). Patient-based analysis was conducted in nine studies (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B19">19</xref>&#x2013;<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B33">33</xref>), lesion-based in four (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B29">29</xref>, <xref ref-type="bibr" rid="B30">30</xref>), and image-based in five (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B31">31</xref>, <xref ref-type="bibr" rid="B32">32</xref>). Study characteristics along with patient demographics and technical details are summarized in <xref ref-type="table" rid="T1"><bold>Tables&#xa0;1</bold></xref>, <xref ref-type="table" rid="T2"><bold>2</bold></xref>, and <xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Table&#xa0;2</bold></xref>.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Study and patient characteristics of the included studies.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" rowspan="2" align="center">Author</th>
<th valign="top" rowspan="2" align="center">Year</th>
<th valign="top" rowspan="2" align="center">Country</th>
<th valign="top" rowspan="2" align="center">Study design</th>
<th valign="top" rowspan="2" align="center">Type of ultrasound</th>
<th valign="top" rowspan="2" align="center">Type of histopathology</th>
<th valign="top" rowspan="2" align="center">Reference standard</th>
<th valign="top" rowspan="2" align="center">Analysis</th>
<th valign="top" colspan="3" align="center">Patients/lesions/images per set</th>
<th valign="top" rowspan="2" align="center">No. of malignant ovarian cancer patients/lesions/images</th>
</tr>
<tr>
<th valign="top" align="center">Training</th>
<th valign="top" align="center">Internal validation</th>
<th valign="top" align="center">External validation</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Li et&#xa0;al. (<xref ref-type="bibr" rid="B17">17</xref>)</td>
<td valign="top" align="center">2022</td>
<td valign="top" align="center">China</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal&amp; Transabdominal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">1099</td>
<td valign="top" align="center">460</td>
<td valign="top" align="center">462</td>
<td valign="top" align="center">Training:217<break/>Internal validation:71<break/>External validation:84</td>
</tr>
<tr>
<td valign="top" align="center">Alc&#xe1;zar et&#xa0;al. (<xref ref-type="bibr" rid="B18">18</xref>)</td>
<td valign="top" align="center">2001</td>
<td valign="top" align="center">Spain</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">LB</td>
<td valign="top" align="center">268</td>
<td valign="top" align="center">135</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:65<break/>Internal validation:32</td>
</tr>
<tr>
<td valign="top" align="center">Szpurek et&#xa0;al. (<xref ref-type="bibr" rid="B19">19</xref>)</td>
<td valign="top" align="center">2005</td>
<td valign="top" align="center">Poland</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">500</td>
<td valign="top" align="center">66</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:185<break/>Internal validation:22</td>
</tr>
<tr>
<td valign="top" align="center">Timmerman et&#xa0;al. (<xref ref-type="bibr" rid="B20">20</xref>)</td>
<td valign="top" align="center">1999</td>
<td valign="top" align="center">Belgium</td>
<td valign="top" align="center">Pro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">116</td>
<td valign="top" align="center">57</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:33<break/>Internal validation:16</td>
</tr>
<tr>
<td valign="top" align="center">Chen et&#xa0;al. (<xref ref-type="bibr" rid="B21">21</xref>)</td>
<td valign="top" align="center">2022</td>
<td valign="top" align="center">China</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal&amp;Transabdominal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">296</td>
<td valign="top" align="center">85</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:83<break/>Internal validation:24</td>
</tr>
<tr>
<td valign="top" align="center">Holsbeke et&#xa0;al. (<xref ref-type="bibr" rid="B22">22</xref>)</td>
<td valign="top" align="center">2007</td>
<td valign="top" align="center">multiple countries</td>
<td valign="top" align="center">Pro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">809</td>
<td valign="top" align="center">External validation:242</td>
</tr>
<tr>
<td valign="top" align="center">Deeparani et&#xa0;al. (<xref ref-type="bibr" rid="B23">23</xref>)</td>
<td valign="top" align="center">2023</td>
<td valign="top" align="center">Sweden</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Epithelial ovarian cancer, non-epithelial ovarian cancer</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">IB</td>
<td valign="top" align="center">23965</td>
<td valign="top" align="center">15977</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:11680<break/>Internal validation:7995</td>
</tr>
<tr>
<td valign="top" align="center">Vaes et&#xa0;al.</td>
<td valign="top" align="center">2010</td>
<td valign="top" align="center">Belgium</td>
<td valign="top" align="center">Pro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">LB</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">151</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Internal validation:83</td>
</tr>
<tr>
<td valign="top" align="center">Wang et&#xa0;al. (<xref ref-type="bibr" rid="B7">7</xref>)</td>
<td valign="top" align="center">2024</td>
<td valign="top" align="center">China</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal&amp; Transabdominal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">675</td>
<td valign="top" align="center">210</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:228<break/>Internal validation:70</td>
</tr>
<tr>
<td valign="top" align="center">Holsbeke&#x2003; et&#xa0;al. (<xref ref-type="bibr" rid="B25">25</xref>)</td>
<td valign="top" align="center">2009</td>
<td valign="top" align="center">multiple countries</td>
<td valign="top" align="center">Pro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">124</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Internal validation:26</td>
</tr>
<tr>
<td valign="top" align="center">Moro et&#xa0;al. (<xref ref-type="bibr" rid="B26">26</xref>)</td>
<td valign="top" align="center">2024</td>
<td valign="top" align="center">Italy</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal&amp; Transabdominal</td>
<td valign="top" align="center">High grade serous</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">228</td>
<td valign="top" align="center">98</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:170<break/>Internal validation:73</td>
</tr>
<tr>
<td valign="top" align="center">Jung et&#xa0;al. (<xref ref-type="bibr" rid="B27">27</xref>)</td>
<td valign="top" align="center">2022</td>
<td valign="top" align="center">Korea</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">IB</td>
<td valign="top" align="center">1613</td>
<td valign="top" align="center">1613</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:539<break/>Internal validation:539</td>
</tr>
<tr>
<td valign="top" align="center">Stefan et&#xa0;al. (<xref ref-type="bibr" rid="B28">28</xref>)</td>
<td valign="top" align="center">2021</td>
<td valign="top" align="center">Romania</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">IB</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">123</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Internal validation:35</td>
</tr>
<tr>
<td valign="top" align="center">Amidi et&#xa0;al. (<xref ref-type="bibr" rid="B29">29</xref>)</td>
<td valign="top" align="center">2019</td>
<td valign="top" align="center">America</td>
<td valign="top" align="center">Pro</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">High grade serous carcinoma, Endometrioid carcinoma</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">LB</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">13</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Internal validation:7</td>
</tr>
<tr>
<td valign="top" align="center">Lin et&#xa0;al.</td>
<td valign="top" align="center">2024</td>
<td valign="top" align="center">America</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">LB</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">93</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Internal validation:21</td>
</tr>
<tr>
<td valign="top" align="center">Wang et&#xa0;al. (<xref ref-type="bibr" rid="B31">31</xref>)</td>
<td valign="top" align="center">2021</td>
<td valign="top" align="center">China</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transabdominal</td>
<td valign="top" align="center">Serous ovarian carcinoma</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">IB</td>
<td valign="top" align="center">279</td>
<td valign="top" align="center">279</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:171<break/>Internal validation:171</td>
</tr>
<tr>
<td valign="top" align="center">Acharya et&#xa0;al.</td>
<td valign="top" align="center">2012</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">IB</td>
<td valign="top" align="center">2600</td>
<td valign="top" align="center">2600</td>
<td valign="top" align="center">NA</td>
<td valign="top" align="center">Training:1300<break/>Internal validation:1300</td>
</tr>
<tr>
<td valign="top" align="center">Gao et&#xa0;al. (<xref ref-type="bibr" rid="B9">9</xref>)</td>
<td valign="top" align="center">2022</td>
<td valign="top" align="center">China</td>
<td valign="top" align="center">Retro</td>
<td valign="top" align="center">Transvaginal &amp; Transrectal</td>
<td valign="top" align="center">Serous, Mucinous, Endometrioid, Clear cell</td>
<td valign="top" align="center">Pathology</td>
<td valign="top" align="center">PB</td>
<td valign="top" align="center">105532</td>
<td valign="top" align="center">868</td>
<td valign="top" align="center">1224</td>
<td valign="top" align="center">Training:3755<break/>Internal validation:266<break/>External validation:233</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>tfn: Retro retrospective; Pro prospective; PB patient-based; IB image-based; LB lesion-based; NA not available.</p></fn>
</table-wrap-foot>
</table-wrap>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Technical aspects of included studies.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" rowspan="2" align="center">Author</th>
<th valign="middle" rowspan="2" align="center">Year</th>
<th valign="middle" rowspan="2" align="center">AI method</th>
<th valign="middle" rowspan="2" align="center">AI model</th>
<th valign="middle" rowspan="2" align="center">Optimal AI algorithms <sup>a</sup></th>
<th valign="middle" rowspan="2" align="center">Data splitting method</th>
<th valign="middle" colspan="4" align="center">Internal validation sets</th>
<th valign="middle" colspan="4" align="center">External validation sets</th>
<th valign="middle" colspan="4" align="center">Sonographers</th>
</tr>
<tr>
<th valign="middle" align="center">TP</th>
<th valign="middle" align="center">FP</th>
<th valign="middle" align="center">FN</th>
<th valign="middle" align="center">TN</th>
<th valign="middle" align="center">TP</th>
<th valign="middle" align="center">FP</th>
<th valign="middle" align="center">FN</th>
<th valign="middle" align="center">TN</th>
<th valign="middle" align="center">TP</th>
<th valign="middle" align="center">FP</th>
<th valign="middle" align="center">FN</th>
<th valign="middle" align="center">TN</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Li et&#xa0;al. (<xref ref-type="bibr" rid="B17">17</xref>) (set 1)</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">CNN</td>
<td valign="middle" align="center">Time-Series Split</td>
<td valign="middle" align="center">62</td>
<td valign="middle" align="center">14</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">120</td>
<td valign="middle" align="center">46</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">42</td>
<td valign="middle" align="center">41</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">42</td>
</tr>
<tr>
<td valign="middle" align="center">Li et&#xa0;al. (<xref ref-type="bibr" rid="B17">17</xref>) (set 2)</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">CNN</td>
<td valign="middle" align="center">Time-Series Split</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="left">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="left">34</td>
<td valign="middle" align="center">22</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">104</td>
<td valign="middle" align="center">20</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">13</td>
<td valign="middle" align="center">116</td>
</tr>
<tr>
<td valign="middle" align="center">Alc&#xe1;zar et&#xa0;al. (<xref ref-type="bibr" rid="B18">18</xref>)</td>
<td valign="middle" align="center">2001</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">LR</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">31</td>
<td valign="middle" align="left">6</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">97</td>
<td valign="middle" align="left">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Szpurek et&#xa0;al. (<xref ref-type="bibr" rid="B19">19</xref>)</td>
<td valign="middle" align="center">2005</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">MLP</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">18</td>
<td valign="middle" align="center">3</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">41</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Timmerman et&#xa0;al. (<xref ref-type="bibr" rid="B20">20</xref>)</td>
<td valign="middle" align="center">1999</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical Model</td>
<td valign="middle" align="center">LR</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">15</td>
<td valign="middle" align="left">2</td>
<td valign="middle" align="left">1</td>
<td valign="middle" align="left">39</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Chen et&#xa0;al. (<xref ref-type="bibr" rid="B21">21</xref>)</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">CNN</td>
<td valign="middle" align="center">Stratified sampling</td>
<td valign="middle" align="center">22</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">52</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">23</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">53</td>
</tr>
<tr>
<td valign="middle" align="center">Holsbeke et&#xa0;al. (<xref ref-type="bibr" rid="B22">22</xref>)</td>
<td valign="middle" align="center">2007</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical Model</td>
<td valign="middle" align="center">RVM</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">218</td>
<td valign="middle" align="center">146</td>
<td valign="middle" align="center">24</td>
<td valign="middle" align="center">421</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Deeparani et&#xa0;al. (<xref ref-type="bibr" rid="B23">23</xref>)</td>
<td valign="middle" align="center">2023</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">CNN</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">7995</td>
<td valign="middle" align="center">3</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">7977</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Vaes et&#xa0;al.</td>
<td valign="middle" align="center">2010</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical</td>
<td valign="middle" align="center">LR</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">69</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">14</td>
<td valign="middle" align="center">67</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Wang et&#xa0;al. (<xref ref-type="bibr" rid="B7">7</xref>)</td>
<td valign="middle" align="center">2024</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical</td>
<td valign="middle" align="center">CNN</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">66</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">133</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Holsbeke et&#xa0;al. (<xref ref-type="bibr" rid="B25">25</xref>)</td>
<td valign="middle" align="center">2009</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical</td>
<td valign="middle" align="center">LR</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">25</td>
<td valign="middle" align="center">25</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">73</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Moro et&#xa0;al. (<xref ref-type="bibr" rid="B26">26</xref>)</td>
<td valign="middle" align="center">2024</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical</td>
<td valign="middle" align="center">LR</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">72</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">16</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">72</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">18</td>
</tr>
<tr>
<td valign="middle" align="center">Jung et&#xa0;al. (<xref ref-type="bibr" rid="B27">27</xref>)</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">CNN</td>
<td valign="middle" align="center">5-fold cross validation</td>
<td valign="middle" align="center">458</td>
<td valign="middle" align="center">101</td>
<td valign="middle" align="center">81</td>
<td valign="middle" align="center">973</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Stefan et&#xa0;al. (<xref ref-type="bibr" rid="B28">28</xref>)</td>
<td valign="middle" align="center">2021</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">Multiple Regression</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">32</td>
<td valign="middle" align="center">6</td>
<td valign="middle" align="center">3</td>
<td valign="middle" align="center">82</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Amidi et&#xa0;al. (<xref ref-type="bibr" rid="B29">29</xref>)</td>
<td valign="middle" align="center">2019</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical</td>
<td valign="middle" align="center">SVM</td>
<td valign="middle" align="center">Random cross validation</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">2</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Lin et&#xa0;al.</td>
<td valign="middle" align="center">2024</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic &amp;Clinical</td>
<td valign="middle" align="center">KNN</td>
<td valign="middle" align="center">Random split</td>
<td valign="middle" align="center">20</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">1</td>
<td valign="middle" align="center">68</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Wang et&#xa0;al. (<xref ref-type="bibr" rid="B31">31</xref>)</td>
<td valign="middle" align="center">2021</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">DCNN</td>
<td valign="middle" align="center">3-fold cross validation</td>
<td valign="middle" align="center">156</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">15</td>
<td valign="middle" align="center">98</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">128</td>
<td valign="middle" align="center">19</td>
<td valign="middle" align="center">43</td>
<td valign="middle" align="center">89</td>
</tr>
<tr>
<td valign="middle" align="center">Acharya et&#xa0;al.</td>
<td valign="middle" align="center">2012</td>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">PNN</td>
<td valign="middle" align="center">10-fold cross validation</td>
<td valign="middle" align="center">1290</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">1296</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
<tr>
<td valign="middle" align="center">Gao et&#xa0;al. (<xref ref-type="bibr" rid="B9">9</xref>) (set 1)</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">DCNN</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">210</td>
<td valign="middle" align="center">41</td>
<td valign="middle" align="center">56</td>
<td valign="middle" align="center">561</td>
<td valign="middle" align="center">27</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">40</td>
<td valign="middle" align="center">264</td>
<td valign="middle" align="center">44</td>
<td valign="middle" align="center">46</td>
<td valign="middle" align="center">36</td>
<td valign="middle" align="center">254</td>
</tr>
<tr>
<td valign="middle" align="center">Gao et&#xa0;al. (<xref ref-type="bibr" rid="B9">9</xref>) (set 2)</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">DCNN</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">96</td>
<td valign="middle" align="center">61</td>
<td valign="middle" align="center">70</td>
<td valign="middle" align="center">662</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
<td valign="middle" align="center">NA</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>TP, true positive; TN, true negative; FP, false positive; FN, false positive; NA, not available; <sup>a</sup> Optimal means the artificial intelligence algorithm with the highest AUC value; DCNN, Deep Convolutional Neural Networks; CNN, Convolutional Neural Networks; PNN, Probability Neural Network; KNN, k-Nearest Neighbors; MLP, Multilayer Perceptron; RVM, Relevance Vector Machine; SVM, Support Vector Machine; LR, Logistic Regression.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>The risk of bias assessment using the revised QUADAS-2 tool is shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>. Regarding the index test, one study was rated as &#x201c;high risk&#x201d; because it only reported the model name without critical training procedures details (<xref ref-type="bibr" rid="B18">18</xref>). In the analysis domain, three studies were deemed &#x201c;high risk&#x201d; due to the exclusion of participants from specific subgroups or partial cohorts (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B29">29</xref>). The methodological quality of included studies was deemed acceptable based on overall quality assessment.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Risk of bias and applicability concerns of the included studies using the revised Quality Assessment of Diagnostic Performance Studies-2 (QUADAS-2) tool.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-15-1626286-g002.tif">
<alt-text content-type="machine-generated">Grid chart displaying the risk of bias and applicability concerns for various studies. Columns represent &#x201c;Patient Selection,&#x201d; &#x201c;Index Test,&#x201d; &#x201c;Reference Standard,&#x201d; and &#x201c;Analysis.&#x201d; Each study is assessed with symbols: green plus for low risk, yellow question mark for unclear risk, and red minus for high risk. Most assessments show low risk, with occasional unclear or high risks noted.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_3">
<title>Diagnostic performance of internal validation sets for AI and sonographers in predicting ovarian malignancy: non-head-to-head comparison</title>
<p>For the internal validation sets, the sensitivity of AI based on ultrasound in detecting ovarian malignancy was 0.95 (95% CI: 0.88-0.98), and the specificity was 0.95 (95% CI: 0.89-0.98) (<xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>). The AUC was 0.98 (95% CI: 0.97-0.99) (<xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4a</bold></xref>). Using a pretest probability of 20%, the Fagan nomogram showed a positive likelihood ratio of 82% and a negative likelihood ratio of 1% (<xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5a</bold></xref>).</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Forest plots displaying the sensitivity and specificity of the internal validation sets. Squares denoted the sensitivity and specificity in each study, while horizontal bars indicated the 95% confidence interval.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-15-1626286-g003.tif">
<alt-text content-type="machine-generated">Forest plot showing sensitivity and specificity for various studies. The left panel depicts sensitivity with blue squares and lines indicating confidence intervals, while the right panel shows specificity with yellow squares and lines. Each study is listed alongside its respective values. Combined results are at the bottom, with sensitivity at 0.95 [0.88 - 0.98], and specificity at 0.95 [0.89 - 0.98]. Heterogeneity statistics are provided at the bottom of each panel.</alt-text>
</graphic></fig>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Summary receiver operating characteristic (SROC) curves of ultrasound-based artificial intelligence on the internal validation set <bold>(a)</bold> and sonographers <bold>(b)</bold> for diagnosing ovarian cancer.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-15-1626286-g004.tif">
<alt-text content-type="machine-generated">Two panels, a and b, display summary receiver operating characteristic (SROC) curves with sensitivity versus specificity. Panel a has light blue circles for observed data, a red diamond for the summary operating point (SENS = 0.95, SPEC = 0.95), and shows a solid line for the SROC curve with AUC of 0.98. Panel b features light green circles, a red diamond (SENS = 0.83, SPEC = 0.84), and indicates an AUC of 0.87. Both panels include dotted and dashed lines for 95% prediction and confidence contours, respectively. Legends explain the symbols and lines.</alt-text>
</graphic></fig>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Fagan&#x2019;s nomogram for artificial intelligence on the internal validation set <bold>(a)</bold> and sonographers <bold>(b)</bold> for diagnosing ovarian cancer.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-15-1626286-g005.tif">
<alt-text content-type="machine-generated">Panel a shows a Fagan's nomogram with a pre-test probability of 20% and a likelihood ratio positive of 18, resulting in an 82% post-test probability. The likelihood ratio negative is 0.06, leading to a 1% post-test probability. Panel b displays a pre-test probability of 20% with a likelihood ratio positive of 5, resulting in a 57% post-test probability. The likelihood ratio negative is 0.20, resulting in a 5% post-test probability. Both panels feature lines connecting these probabilities on logarithmic scales.</alt-text>
</graphic></fig>
<p>For sonographers, the sensitivity in detecting ovarian malignancy was 0.83 (95% CI: 0.62-0.94), and the specificity was 0.84 (95% CI: 0.79-0.88) (<xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref>). The AUC was 0.87 (95% CI: 0.84-0.90) (<xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4b</bold></xref>). Using a pretest probability of 20%, the Fagan nomogram showed a positive likelihood ratio of 57% and a negative likelihood ratio of 5% (<xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5b</bold></xref>). The AUC value of the AI based on ultrasound was significantly higher than that of the sonographers, with no overlapping 95% CI.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Forest plots displaying the sensitivity and specificity of the sonographers. Squares denoted the sensitivity and specificity in each study, while horizontal bars indicated the 95% confidence interval.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-15-1626286-g006.tif">
<alt-text content-type="machine-generated">Forest plot comparing sensitivity and specificity of various studies. Sensitivity ranges from 0.55 to 0.99 with a combined sensitivity of 0.83, while specificity ranges from 0.72 to 0.92 with a combined specificity of 0.84. Confidence intervals are shown for each study. Statistical measures include Q and I&#xb2; values with significance noted.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_4">
<title>Diagnostic performance of AI and sonographers in predicting ovarian malignancy: head-to-head comparison</title>
<p>Six studies provided data for head-to-head comparison. The sensitivity of AI based on ultrasound in detecting ovarian malignancy was 0.91 (95% CI: 0.74-0.97), and the specificity was 0.89 (95% CI: 0.76-0.95), and the AUC was 0.95 (95% CI: 0.93-0.97) (<xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>). For sonographers, the sensitivity in detecting ovarian malignancy was 0.83 (95% CI: 0.62-0.94), and the specificity was 0.84 (95% CI: 0.79-0.88). The AUC was 0.87 (95% CI: 0.84-0.90) (<xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>). The AUC value of the AI based on ultrasound was significantly higher than that of the sonographers, with no overlapping 95% CI.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Sensitivity analysis of ultrasound-based artificial intelligence performance.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Sensitivity analysis</th>
<th valign="top" align="center">Datasets, n</th>
<th valign="top" align="center">Sensitivity (95%CI)</th>
<th valign="top" align="center">Specificity (95%CI)</th>
<th valign="top" align="center">AUC (95%CI)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Only patient-based analysis studies included</td>
<td valign="top" align="center">8</td>
<td valign="top" align="center">0.92(0.84-0.96)</td>
<td valign="top" align="center">0.88(0.82-0.93)</td>
<td valign="top" align="center">0.96(0.94-0.97)</td>
</tr>
<tr>
<td valign="top" align="center">Only image-based analysis studies included</td>
<td valign="top" align="center">5</td>
<td valign="top" align="center">0.98(0.85-1.00)</td>
<td valign="top" align="center">0.99(0.90-1.00)</td>
<td valign="top" align="center">1.00(0.99-1.00)</td>
</tr>
<tr>
<td valign="top" align="center">Excluding studies that included borderline ovarian tumors</td>
<td valign="top" align="center">2</td>
<td valign="top" align="center">0.99(0.98-0.99)</td>
<td valign="top" align="center">0.99(0.99-1.00)</td>
<td valign="top" align="center">NA</td>
</tr>
<tr>
<td valign="top" align="center">AI performance in head-to-head studies</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">0.91(0.74-0.97)</td>
<td valign="top" align="center">0.89(0.76-0.95)</td>
<td valign="top" align="center">0.95(0.93-0.97)</td>
</tr>
<tr>
<td valign="top" align="center">Sonographers performance in head-to-head studies</td>
<td valign="top" align="center">6</td>
<td valign="top" align="center">0.83(0.62-0.94)</td>
<td valign="top" align="center">0.84(0.79-0.88)</td>
<td valign="top" align="center">0.87(0.84-0.90)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>AUC, area under the curve.</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_5">
<title>Subgroup analysis and meta-regression for internal validation sets for AI in predicting ovarian malignancy</title>
<p>Subgroup analyses demonstrated consistently high sensitivity across all categories (range: 0.89-0.98), with no statistically significant differences observed between study designs (prospective vs. retrospective), AI methodologies (deep learning vs. machine learning), ultrasound approaches (transvaginal+ abdominal vs. transvaginal alone), AI model types (radiomic vs. radiomic+ clinical), or algorithm choices (CNN vs. logistic regression), or type of histopathology (serous vs. non-serous carcinomas) (all <italic>P</italic> &gt; 0.05). Specificity values showed greater variability (range: 0.87-0.96), with a statistically significant difference noted between image-based (0.96, 95% CI: 0.93-1.00) and patient-based analyses (0.89, 95% CI: 0.79-1.00; <italic>P</italic> = 0.01). No other significant specificity differences were detected across subgroups including histological subtypes (all <italic>P</italic> &gt; 0.05), though radiomic models (0.96) trended toward higher specificity than radiomic+ clinical models (0.90; <italic>P</italic> = 0.06).</p>
<p>For the internal validation sets, high heterogeneity was detected in both sensitivity (I&#xb2; = 98.15%) and specificity (I&#xb2; = 97.67%). Meta-regression analysis indicated that the heterogeneity was primarily attributed to the analysis method (Image-based vs. Patient-based, specificity <italic>P</italic> = 0.01) (<xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>).</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Subgroup analysis of artificial intelligence performance in internal validation sets for ovarian cancer.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Subgroup</th>
<th valign="middle" align="center">Studies, n</th>
<th valign="middle" align="center">Sensitivity(95%CI)</th>
<th valign="middle" align="center">Meta-regression <italic>P</italic>-value</th>
<th valign="middle" align="center">Specificity(95%CI)</th>
<th valign="middle" align="center">Meta-regression <italic>P</italic>-value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="center">Study design</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.34</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.51</td>
</tr>
<tr>
<td valign="middle" align="center">Prospective</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">0.89(0.72-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.88(0.83-0.93)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Retrospective</td>
<td valign="middle" align="center">13</td>
<td valign="middle" align="center">0.96(0.92-0.99)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.96(0.92-0.99)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Analysis</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.09</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.01</td>
</tr>
<tr>
<td valign="middle" align="center">Image-based</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">0.98(0.95-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.96(0.93-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Patient-based</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">0.92(0.84-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.89(0.79-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">AI method</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.70</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.27</td>
</tr>
<tr>
<td valign="middle" align="center">Deep learning</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">0.94(0.88-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.96(0.91-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Machine learning</td>
<td valign="middle" align="center">9</td>
<td valign="middle" align="center">0.95(0.90-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.94(0.87-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Type of Ultrasound</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.24</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.62</td>
</tr>
<tr>
<td valign="middle" align="center">Transvaginal&amp; Transabdominal</td>
<td valign="middle" align="center">4</td>
<td valign="middle" align="center">0.94(0.89-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.87(0.74-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Transvaginal</td>
<td valign="middle" align="center">8</td>
<td valign="middle" align="center">0.95(0.91-0.99)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.96(0.92-0.99)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">AI model</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.39</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.06</td>
</tr>
<tr>
<td valign="middle" align="center">Radiomic</td>
<td valign="middle" align="center">10</td>
<td valign="middle" align="center">0.95(0.91-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.96(0.93-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Radiomic &amp; Clinical</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">0.93(0.85-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.90(0.80-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Optimal AI algorithms <sup>a</sup></td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.69</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.32</td>
</tr>
<tr>
<td valign="middle" align="center">CNN</td>
<td valign="middle" align="center">7</td>
<td valign="middle" align="center">0.95(0.89-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.96(0.91-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">LR</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">0.96(0.88-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.90(0.77-1.00)</td>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Type of histopathology</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
<td valign="middle" align="center"/>
</tr>
<tr>
<td valign="middle" align="center">Non-serous carcinoma</td>
<td valign="middle" align="center">12</td>
<td valign="middle" align="center">0.93(0.87-0.99)</td>
<td valign="middle" align="center">0.67</td>
<td valign="middle" align="center">0.94(0.89-0.99)</td>
<td valign="middle" align="center">0.97</td>
</tr>
<tr>
<td valign="middle" align="center">Serous carcinoma</td>
<td valign="middle" align="center">5</td>
<td valign="middle" align="center">0.97(0.92-1.00)</td>
<td valign="middle" align="center"/>
<td valign="middle" align="center">0.96(0.89-1.00)</td>
<td valign="middle" align="center"/>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>CNN, Convolutional Neural Networks; AI, Artificial Intelligence; <sup>a</sup> Optimal means the artificial intelligence algorithm with the highest AUC value; LR, Logistic Regression.</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_6">
<title>Diagnostic performance of external validation sets for AI in predicting ovarian malignancy</title>
<p>For the external validation sets, the sensitivity of AI in detecting ovarian malignancy was 0.78 (95% CI:0.56-0.91), and the specificity was 0.88 (95% CI: 0.76-0.95) (<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Figure&#xa0;1</bold></xref>). The AUC was 0.91 (95% CI: 0.88-0.93) (<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Figure&#xa0;2</bold></xref>). Using a pretest probability of 20%, the Fagan nomogram showed a positive likelihood ratio of 63% and a negative likelihood ratio of 6% (<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Figure&#xa0;3</bold></xref>).</p>
</sec>
<sec id="s3_7">
<title>Publication bias</title>
<p>Deeks&#x2019; funnel plot asymmetry test indicated significant publication bias in the internal validation sets for AI and sonographers (<italic>P</italic> &lt; 0.001, <italic>P</italic> = 0.03) (<xref ref-type="fig" rid="f7"><bold>Figures&#xa0;7a, b</bold></xref>). However, no significant publication bias was found in the external validation sets (<italic>P</italic> = 0.13) (<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Figure&#xa0;4</bold></xref>).</p>
<fig id="f7" position="float">
<label>Figure&#xa0;7</label>
<caption>
<p>Deek&#x2019;s funnel plot was used to evaluate internal validation set <bold>(a)</bold> and sonographers <bold>(b)</bold> the publication bias of ultrasound-based artificial intelligence for diagnosing ovarian cancer. <italic>P</italic> &lt; 0.05 was considered significant.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-15-1626286-g007.tif">
<alt-text content-type="machine-generated">Two funnel plots showing Deeks' funnel plot asymmetry test results. The left plot, for internal validation sets, displays points scattered around a line, with a p-value less than 0.001. The right plot, for sonographers, shows fewer points with a p-value of 0.03. Both plots have a diagnostic odds ratio on the x-axis and 1/root(ESS) on the y-axis, with a legend indicating studies and regression lines.</alt-text>
</graphic></fig>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<title>Discussion</title>
<p>Ultrasound-based AI demonstrated excellent diagnostic performance in the initial diagnosis of ovarian cancer, achieving AUC values of 0.98 and 0.91 in the internal and external validation sets, respectively. Additionally, our results revealed that the AUC value for sonographers was 0.87, with no overlap in the 95%CI compared to AI, indicating that the AI model significantly outperformed traditional ultrasound diagnosis. The decline in diagnostic performance in the external validation sets may be attributed to sample heterogeneity and differences in operational standards across hospitals, which could affect the model&#x2019;s generalizability. In contrast, the internal validation sets was conducted in a relatively uniform environment, ensuring the model&#x2019;s high performance (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). The advantages of ultrasound-based AI algorithms in complex pattern recognition and data processing enable them to better identify subtle lesion features, thereby providing higher sensitivity and specificity. However, sonographers may be influenced by factors such as experience, fatigue, and subjective judgment when interpreting images, leading to lower diagnostic performance (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B9">9</xref>). Therefore, AI technology in the early screening of ovarian cancer may effectively improve diagnostic accuracy and enhance patient prognosis.</p>
<p>Interestingly, according to the results of our subgroup analysis, the sensitivity and specificity of deep learning were 0.94 and 0.96, respectively, while those of machine learning were 0.95 and 0.94. The differences between the two were not statistically significant (both <italic>P</italic> &gt; 0.05). This indicates that deep learning does not significantly outperform machine learning, which is consistent with Xu et&#xa0;al. previous findings (<xref ref-type="bibr" rid="B1">1</xref>). However, due to the high heterogeneity, these results should be interpreted with caution. More studies are needed in the future to confirm whether deep learning algorithms offer any additional value over machine learning algorithms in the accurate diagnosis of ovarian cancer.</p>
<p>We found that regarding different AI models, the sensitivity and specificity of radiomic models did not show statistically significant differences compared to the combined approach of radiomic &amp; clinical models (both <italic>P</italic> &gt; 0.05). This may be attributed to the inherent complexities of integrating clinical data with imaging features (<xref ref-type="bibr" rid="B34">34</xref>). Radiomic, which extracts quantitative features from medical images, may not fully account for the multifactorial nature of tumor behavior and clinical symptoms that influence diagnostic outcomes (<xref ref-type="bibr" rid="B35">35</xref>). Furthermore, the potential redundancy in the information provided by radiomic features and clinical data may not contribute to an improvement in diagnostic performance metrics (<xref ref-type="bibr" rid="B2">2</xref>). In the studies we included, most of the researches that combined radiomics with clinical features used machine learning algorithms, while studies that relied solely on radiomics mostly adopted deep learning algorithms. Therefore, the choice of training algorithm may also influence the current results. More research is needed in the future to determine the value of multimodal AI algorithms compared to studies using radiomics alone.</p>
<p>In 2022, Xu et&#xa0;al. (<xref ref-type="bibr" rid="B1">1</xref>) performed a meta-analysis evaluating the diagnostic performance of a multimodal imaging-based AI approach incorporating CT, MRI, and ultrasound for ovarian cancer. The AI-based ultrasound diagnosis demonstrated a sensitivity of 0.91, specificity of 0.87, and AUC of 0.95. Their research compared a multimodal imaging-based AI approach incorporating CT, MRI, and ultrasound with human radiologists and found that AI outperformed the radiologists (AUC: 0.93 vs. 0.85). In 2024, Mitchell et&#xa0;al. (<xref ref-type="bibr" rid="B3">3</xref>) published another meta-analysis on AI-based ultrasound diagnosis for ovarian cancer, reporting an AI sensitivity of 0.81, specificity of 0.92, and AUC of 0.87. However, only two of the included studies compared AI with sonographers, and the diagnostic performance of sonographers was not extracted in their study. In addition, their study only performed a pooled analysis of 14 articles and did not conduct in-depth subgroup analyses based on different ultrasound equipment types or various AI algorithms.</p>
<p>To the best of our knowledge, this the first meta-analysis to separately extract data from internal validation sets, external validation sets, and diagnostic performance data of sonographers. Compared to previous published studies, our results demonstrate higher diagnostic performance for AI models (AUC:0.98), which may be attributed to updates in new algorithms that enhance performance. Our findings also indicate that AI-based ultrasound exhibits a diagnostic performance advantage over sonographers in the initial diagnosis of ovarian cancer (AUC:0.98 vs. 0.87), which consistent with prior findings (<xref ref-type="bibr" rid="B1">1</xref>).</p>
<p>It&#x2019;s worth to noted that the inclusion of studies in our meta-analysis demonstrated significant heterogeneity, potentially impacting the overall sensitivity and specificity of AI performance. We conducted a meta-regression analysis, revealing that one of the primary sources of heterogeneity was the difference between analysis (Image-based vs. Patient-based <italic>P</italic> = 0.01). Image-based studies can provide more detailed information, leading to higher diagnostic performance, which may result in overestimation of the accuracy of findings. This is largely due to their focus on specific anatomical structures and lesions, allowing for a nuanced analysis that can highlight minute details that might be overlooked in broader studies. In contrast, patient-based studies do not capture as much information, resulting in significant differences in design and methodology. However, the high heterogeneity may also be attributed to other factors: (1) For patient characteristics, the different stages of ovarian tumors and the age of patients may also influence the performance; (2) For intervention methods, the quality of ultrasound imaging itself, which depends on operator expertise and equipment, could influence AI diagnostic accuracy. These factors highlight the need for standardized reporting in future studies to facilitate more robust meta-analyses.</p>
<p>Our results demonstrate that AI models based on ultrasound have achieved diagnostic performance in both internal and external validation datasets, and they have the potential to become important support tools for sonographers. AI can serve as a powerful auxiliary tool, offering physicians a valuable second opinion, enhancing diagnostic reliability, and reducing the risk of missed or incorrect diagnoses. Additionally, AI has the potential to decrease unnecessary surgeries or procedures, thereby helping to lessen the clinical workload, improve diagnostic accuracy, and prevent adverse outcomes caused by missed or delayed diagnoses (<xref ref-type="bibr" rid="B32">32</xref>). Implementing ultrasound-based AI in primary healthcare systems could facilitate early detection and timely management of ovarian cancer. However, it is crucial to emphasize that these models should not be regarded as standalone diagnostic standards or decision-making tools but rather as supplementary aids for sonographers (<xref ref-type="bibr" rid="B9">9</xref>). Due to the significant differences in algorithm interpretability and the lack of standardized algorithm training for ovarian cancer diagnosis, we can only conclude that our results represent a short-term phenomenon, indicating that current AI algorithms already have the potential to outperform traditional sonographers. Notably, only five studies in our analysis compared AI diagnostic performance directly with that of sonographers, highlighting the need for further research in this area.</p>
<p>Some limitations of the current meta-analysis should be considered when interpreting the results. First, most of the included studies were retrospective in design, with only four employing a prospective approach, which may introduce potential biases. To assess the possible impact of different study designs on the outcomes, we conducted a meta-regression analysis, which revealed that the type of study did not significantly affect the results (both P &gt; 0.05). Therefore, well-designed prospective studies are essential to validate the findings of this meta-analysis and ensure the reliability and generalizability of the results. Second, only the optimal AI algorithms were selected for analysis, which may lead to an overestimation of the results. This was primarily because we aimed to avoid potential patient overlap. However, this approach may have led to an overly optimistic assessment of AI model performance. Future studies should compare the performance of different models, including those with moderate or poor outcomes, to provide a more comprehensive and objective evaluation. Third, due to lack of information, the study did not stratify sonographers by their experience and skill level. Future studies should consider stratifying sonographers by experience levels to better evaluate the impact of AI assistance on diagnostic performance across different practitioner groups. Fourth, since most of the included studies classified borderline tumors as part of the malignant (positive) group, we followed their approach and classified both borderline and malignant tumors as a positive group. However, this may have contributed to an increased rate of positive diagnoses. In the future, there is a need to evaluate AI&#x2019;s ability to distinguish between benign, borderline, and malignant ovarian tumors to aid early clinical intervention for malignant cases. Forth, the very high heterogeneity (It &gt; 97%) observed in both sensitivity and specificity, partly driven by methodological factors such as the unit of analysis, significantly limits the strength of conclusions that can be drawn regarding their current clinical readiness. Therefore, any claims about their ability to enhance diagnostic reliability, reduce workload, or improve patient outcomes in clinical practice are currently premature and require validation through more standardized, large-scale studies. Fifth, the classification of borderline tumors represents a potential source of heterogeneity. Most included studies categorized borderline tumors as malignant for the purpose of binary classification. While this reflects a common clinical challenge in distinguishing these entities from invasive cancer, it may inflate the perceived sensitivity of AI models and complicate the clinical interpretation of results, as the management of borderline tumors differs from that of frank malignancy. To address this concern, we performed a sensitivity analysis excluding studies that explicitly included borderline tumors in the malignant group. Future research should aim to evaluate AI performance across a three-tiered stratification (benign, borderline, malignant) to better assess its clinical utility in differentiating this diagnostically challenging spectrum of disease.</p>
</sec>
<sec id="s5" sec-type="conclusions">
<title>Conclusion</title>
<p>AI based on ultrasound diagnosis demonstrates excellent performance for malignant ovarian cancer detection, with potentially superior performance compared to sonographers. Despite high heterogeneity across studies and the observed publication bias, these results indicate the potential for AI integration into clinical practice. Further studies with external, multicenter prospective head-to-head design are still needed.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1"><bold>Supplementary Material</bold></xref>. Further inquiries can be directed to the corresponding author.</p></sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>RL: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Software, Writing &#x2013; original draft. JL: Conceptualization, Data curation, Formal analysis, Methodology, Writing &#x2013; original draft. XT: Conceptualization, Data curation, Formal analysis, Methodology, Writing &#x2013; original draft. SZ: Conceptualization, Data curation, Formal analysis, Methodology, Validation, Writing &#x2013; original draft. JQ: Conceptualization, Data curation, Formal analysis, Writing &#x2013; original draft. YX: Data curation, Formal analysis, Methodology, Software, Writing &#x2013; original draft. HZ: Conceptualization, Formal analysis, Funding acquisition, Methodology, Resources, Validation, Writing &#x2013; review &amp; editing.</p></sec>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declare that Generative AI was used in the creation of this manuscript. During the preparation of this work, the authors used Sider in order to improve readability and language quality. After using this tool, the authors reviewed and edited the content as needed and take full responsibility for the content of the publication.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<sec id="s12" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fonc.2025.1626286/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fonc.2025.1626286/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="DataSheet1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/>
<supplementary-material xlink:href="DataSheet2.docx" id="SM2" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Xu</surname> <given-names>HL</given-names></name>
<name><surname>Gong</surname> <given-names>TT</given-names></name>
<name><surname>Liu</surname> <given-names>FH</given-names></name>
<name><surname>Chen</surname> <given-names>HY</given-names></name>
<name><surname>Xiao</surname> <given-names>Q</given-names></name>
<name><surname>Hou</surname> <given-names>Y</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence performance in image-based ovarian cancer identification: A systematic review and meta-analysis</article-title>. <source>EClinicalMedicine</source>. (<year>2022</year>) <volume>53</volume>:<elocation-id>101662</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.eclinm.2022.101662</pub-id>, PMID: <pub-id pub-id-type="pmid">36147628</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<label>2</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ma</surname> <given-names>L</given-names></name>
<name><surname>Huang</surname> <given-names>L</given-names></name>
<name><surname>Chen</surname> <given-names>Y</given-names></name>
<name><surname>Zhang</surname> <given-names>L</given-names></name>
<name><surname>Nie</surname> <given-names>D</given-names></name>
<name><surname>He</surname> <given-names>W</given-names></name>
<etal/>
</person-group>. 
<article-title>AI diagnostic performance based on multiple imaging modalities for ovarian tumor: A systematic review and meta-analysis</article-title>. <source>Front Oncol</source>. (<year>2023</year>) <volume>13</volume>:<elocation-id>1133491</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2023.1133491</pub-id>, PMID: <pub-id pub-id-type="pmid">37152032</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<label>3</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mitchell</surname> <given-names>S</given-names></name>
<name><surname>Nikolopoulos</surname> <given-names>M</given-names></name>
<name><surname>El-Zarka</surname> <given-names>A</given-names></name>
<name><surname>Al-Karawi</surname> <given-names>D</given-names></name>
<name><surname>Al-Zaidi</surname> <given-names>S</given-names></name>
<name><surname>Ghai</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence in ultrasound diagnoses of ovarian cancer: A systematic review and meta-analysis</article-title>. <source>Cancers</source>. (<year>2024</year>) <volume>16</volume>:<fpage>422</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/cancers16020422</pub-id>, PMID: <pub-id pub-id-type="pmid">38275863</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<label>4</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kumar</surname> <given-names>Y</given-names></name>
<name><surname>Koul</surname> <given-names>A</given-names></name>
<name><surname>Singla</surname> <given-names>R</given-names></name>
<name><surname>Ijaz</surname> <given-names>MF</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in disease diagnosis: a systematic literature review, synthesizing framework and future research agenda</article-title>. <source>J Ambient Intell Humaniz Comput</source>. (<year>2023</year>) <volume>14</volume>:<page-range>8459&#x2013;86</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s12652-021-03612-z</pub-id>, PMID: <pub-id pub-id-type="pmid">35039756</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<label>5</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bedrikovetski</surname> <given-names>S</given-names></name>
<name><surname>Dudi-Venkata</surname> <given-names>NN</given-names></name>
<name><surname>Maicas</surname> <given-names>G</given-names></name>
<name><surname>Kroon</surname> <given-names>HM</given-names></name>
<name><surname>Seow</surname> <given-names>W</given-names></name>
<name><surname>Carneiro</surname> <given-names>G</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence for the diagnosis of lymph node metastases in patients with abdominopelvic Malignancy: A systematic review and meta-analysis</article-title>. <source>Artif Intell Med</source>. (<year>2021</year>) <volume>113</volume>:<elocation-id>102022</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.artmed.2021.102022</pub-id>, PMID: <pub-id pub-id-type="pmid">33685585</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<label>6</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Campbell</surname> <given-names>S</given-names></name>
<name><surname>Gentry-Maharaj</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>The role of transvaginal ultrasound in screening for ovarian cancer</article-title>. <source>Climacteric</source>. (<year>2018</year>) <volume>21</volume>:<page-range>221&#x2013;6</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1080/13697137.2018.1433656</pub-id>, PMID: <pub-id pub-id-type="pmid">29490504</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<label>7</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>ZM</given-names></name>
<name><surname>Luo</surname> <given-names>SY</given-names></name>
<name><surname>Chen</surname> <given-names>J</given-names></name>
<name><surname>Jiao</surname> <given-names>Y</given-names></name>
<name><surname>Cui</surname> <given-names>C</given-names></name>
<name><surname>Shi</surname> <given-names>SY</given-names></name>
<etal/>
</person-group>. 
<article-title>Multi-modality deep learning model reaches high prediction accuracy in the diagnosis of ovarian cancer</article-title>. <source>Iscience</source>. (<year>2024</year>) <volume>27</volume>:<fpage>109403</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.isci.2024.109403</pub-id>, PMID: <pub-id pub-id-type="pmid">38523785</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<label>8</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sogani</surname> <given-names>J</given-names></name>
<name><surname>Allen</surname> <given-names>B</given-names> <suffix>Jr.</suffix></name>
<name><surname>Dreyer</surname> <given-names>K</given-names></name>
<name><surname>McGinty</surname> <given-names>G</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in radiology: the ecosystem essential to improving patient care</article-title>. <source>Clin Imaging</source>. (<year>2020</year>) <volume>59</volume>:<fpage>A3</fpage>&#x2013;<lpage>a6</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.clinimag.2019.08.001</pub-id>, PMID: <pub-id pub-id-type="pmid">31481284</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<label>9</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gao</surname> <given-names>Y</given-names></name>
<name><surname>Zeng</surname> <given-names>SQ</given-names></name>
<name><surname>Xu</surname> <given-names>XY</given-names></name>
<name><surname>Li</surname> <given-names>HY</given-names></name>
<name><surname>Yao</surname> <given-names>SZ</given-names></name>
<name><surname>Song</surname> <given-names>K</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning-enabled pelvic ultrasound images for accurate diagnosis of ovarian cancer in China: a retrospective, multicentre, diagnostic study</article-title>. <source>Lancet Digital Health</source>. (<year>2022</year>) <volume>4</volume>:<page-range>E179&#x2013;E87</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2589-7500(21)00278-8</pub-id>, PMID: <pub-id pub-id-type="pmid">35216752</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<label>10</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Moher</surname> <given-names>D</given-names></name>
<name><surname>Liberati</surname> <given-names>A</given-names></name>
<name><surname>Tetzlaff</surname> <given-names>J</given-names></name>
<name><surname>Altman</surname> <given-names>DG</given-names></name>
</person-group>. 
<article-title>Preferred reporting items for systematic reviews and meta-analyses: the PRISMA statement</article-title>. <source>PloS Med</source>. (<year>2009</year>) <volume>6</volume>:<elocation-id>e1000097</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1371/journal.pmed.1000097</pub-id>, PMID: <pub-id pub-id-type="pmid">19621072</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<label>11</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>McInnes</surname> <given-names>MDF</given-names></name>
<name><surname>Moher</surname> <given-names>D</given-names></name>
<name><surname>Thombs</surname> <given-names>BD</given-names></name>
<name><surname>McGrath</surname> <given-names>TA</given-names></name>
<name><surname>Bossuyt</surname> <given-names>PM</given-names></name>
<name><surname>Clifford</surname> <given-names>T</given-names></name>
<etal/>
</person-group>. 
<article-title>Preferred reporting items for a systematic review and meta-analysis of diagnostic test accuracy studies: the PRISMA-DTA statement</article-title>. <source>JAMA</source>. (<year>2018</year>) <volume>319</volume>:<page-range>388&#x2013;96</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1001/jama.2017.19163</pub-id>, PMID: <pub-id pub-id-type="pmid">29362800</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<label>12</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Whiting</surname> <given-names>PF</given-names></name>
<name><surname>Rutjes</surname> <given-names>AW</given-names></name>
<name><surname>Westwood</surname> <given-names>ME</given-names></name>
<name><surname>Mallett</surname> <given-names>S</given-names></name>
<name><surname>Deeks</surname> <given-names>JJ</given-names></name>
<name><surname>Reitsma</surname> <given-names>JB</given-names></name>
<etal/>
</person-group>. 
<article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title>. <source>Ann Intern Med</source>. (<year>2011</year>) <volume>155</volume>:<page-range>529&#x2013;36</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id>, PMID: <pub-id pub-id-type="pmid">22007046</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<label>13</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wolff</surname> <given-names>RF</given-names></name>
<name><surname>Moons</surname> <given-names>KGM</given-names></name>
<name><surname>Riley</surname> <given-names>RD</given-names></name>
<name><surname>Whiting</surname> <given-names>PF</given-names></name>
<name><surname>Westwood</surname> <given-names>M</given-names></name>
<name><surname>Collins</surname> <given-names>GS</given-names></name>
<etal/>
</person-group>. 
<article-title>PROBAST: A tool to assess the risk of bias and applicability of prediction model studies</article-title>. <source>Ann Intern Med</source>. (<year>2019</year>) <volume>170</volume>:<page-range>51&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.7326/m18-1376</pub-id>, PMID: <pub-id pub-id-type="pmid">30596875</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<label>14</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Arends</surname> <given-names>L</given-names></name>
<name><surname>Hamza</surname> <given-names>T</given-names></name>
<name><surname>Van Houwelingen</surname> <given-names>J</given-names></name>
<name><surname>Heijenbrok-Kal</surname> <given-names>M</given-names></name>
<name><surname>Hunink</surname> <given-names>M</given-names></name>
<name><surname>Stijnen</surname> <given-names>T</given-names></name>
</person-group>. 
<article-title>Bivariate random effects meta-analysis of ROC curves</article-title>. <source>Med Decision Making</source>. (<year>2008</year>) <volume>28</volume>:<page-range>621&#x2013;38</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/0272989X08319957</pub-id>, PMID: <pub-id pub-id-type="pmid">18591542</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<label>15</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Higgins</surname> <given-names>JP</given-names></name>
<name><surname>Thompson</surname> <given-names>SG</given-names></name>
</person-group>. 
<article-title>Quantifying heterogeneity in a meta-analysis</article-title>. <source>Stat Med</source>. (<year>2002</year>) <volume>21</volume>:<page-range>1539&#x2013;58</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/sim.1186</pub-id>, PMID: <pub-id pub-id-type="pmid">12111919</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<label>16</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Deeks</surname> <given-names>JJ</given-names></name>
<name><surname>Macaskill</surname> <given-names>P</given-names></name>
<name><surname>Irwig</surname> <given-names>L</given-names></name>
</person-group>. 
<article-title>The performance of tests of publication bias and other sample size effects in systematic reviews of diagnostic test accuracy was assessed</article-title>. <source>J Clin Epidemiol</source>. (<year>2005</year>) <volume>58</volume>:<page-range>882&#x2013;93</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jclinepi.2005.01.016</pub-id>, PMID: <pub-id pub-id-type="pmid">16085191</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<label>17</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Li</surname> <given-names>JA</given-names></name>
<name><surname>Chen</surname> <given-names>YX</given-names></name>
<name><surname>Zhang</surname> <given-names>MY</given-names></name>
<name><surname>Zhang</surname> <given-names>PF</given-names></name>
<name><surname>He</surname> <given-names>KL</given-names></name>
<name><surname>Yan</surname> <given-names>FQ</given-names></name>
<etal/>
</person-group>. 
<article-title>A deep learning model system for diagnosis and management of adnexal masses</article-title>. <source>Cancers</source>. (<year>2022</year>) <volume>14</volume>:<fpage>5291</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/cancers14215291</pub-id>, PMID: <pub-id pub-id-type="pmid">36358710</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<label>18</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Alc&#xe1;zar</surname> <given-names>JL</given-names></name>
<name><surname>Errasti</surname> <given-names>T</given-names></name>
<name><surname>Laparte</surname> <given-names>C</given-names></name>
<name><surname>Jurado</surname> <given-names>M</given-names></name>
<name><surname>L&#xf3;pez-Garc&#xed;a</surname> <given-names>G</given-names></name>
</person-group>. 
<article-title>Assessment of a new logistic model in the preoperative evaluation of adnexal masses</article-title>. <source>J Ultrasound Med</source>. (<year>2001</year>) <volume>20</volume>:<page-range>841&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.7863/jum.2001.20.8.841</pub-id>, PMID: <pub-id pub-id-type="pmid">11503920</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<label>19</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Szpurek</surname> <given-names>D</given-names></name>
<name><surname>Moszynski</surname> <given-names>R</given-names></name>
<name><surname>Smolen</surname> <given-names>A</given-names></name>
<name><surname>Sajdak</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Artificial neural network computer prediction of ovarian Malignancy in women with adnexal masses</article-title>. <source>Int J Gynaecol Obstet</source>. (<year>2005</year>) <volume>89</volume>:<page-range>108&#x2013;13</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ijgo.2005.01.034</pub-id>, PMID: <pub-id pub-id-type="pmid">15847872</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<label>20</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Timmerman</surname> <given-names>D</given-names></name>
<name><surname>Verrelst</surname> <given-names>H</given-names></name>
<name><surname>Bourne</surname> <given-names>TH</given-names></name>
<name><surname>De Moor</surname> <given-names>B</given-names></name>
<name><surname>Collins</surname> <given-names>WP</given-names></name>
<name><surname>Vergote</surname> <given-names>I</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial neural network models for the preoperative discrimination between Malignant and benign adnexal masses</article-title>. <source>Ultrasound Obstet Gynecol</source>. (<year>1999</year>) <volume>13</volume>:<fpage>17</fpage>&#x2013;<lpage>25</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1046/j.1469-0705.1999.13010017.x</pub-id>, PMID: <pub-id pub-id-type="pmid">10201082</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<label>21</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chen</surname> <given-names>H</given-names></name>
<name><surname>Yang</surname> <given-names>BW</given-names></name>
<name><surname>Qian</surname> <given-names>L</given-names></name>
<name><surname>Meng</surname> <given-names>YS</given-names></name>
<name><surname>Bai</surname> <given-names>XH</given-names></name>
<name><surname>Hong</surname> <given-names>XW</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning prediction of ovarian Malignancy at US compared with O-RADS and expert assessment</article-title>. <source>Radiology</source>. (<year>2022</year>) <volume>304</volume>:<page-range>106&#x2013;13</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.211367</pub-id>, PMID: <pub-id pub-id-type="pmid">35412367</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<label>22</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Van Holsbeke</surname> <given-names>C</given-names></name>
<name><surname>Van Calster</surname> <given-names>B</given-names></name>
<name><surname>Valentin</surname> <given-names>L</given-names></name>
<name><surname>Testa</surname> <given-names>AC</given-names></name>
<name><surname>Ferrazzi</surname> <given-names>E</given-names></name>
<name><surname>Dimou</surname> <given-names>I</given-names></name>
<etal/>
</person-group>. 
<article-title>External validation of mathematical models to distinguish between benign and Malignant adnexal tumors: A multicenter study by the International Ovarian Tumor Analysis group</article-title>. <source>Clin Cancer Res</source>. (<year>2007</year>) <volume>13</volume>:<page-range>4440&#x2013;7</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1158/1078-0432.CCR-06-2958</pub-id>, PMID: <pub-id pub-id-type="pmid">17671128</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<label>23</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Deeparani</surname> <given-names>M</given-names></name>
<name><surname>Kalamani</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>Gynecological healthcare: unveiling pelvic masses classification through evolutionary gravitational neocognitron neural network optimized with nomadic people optimizer</article-title>. <source>Diagnostics</source>. (<year>2023</year>) <volume>13</volume>:<fpage>3131</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics13193131</pub-id>, PMID: <pub-id pub-id-type="pmid">37835875</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<label>24</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Vaes</surname> <given-names>E</given-names></name>
<name><surname>Manchanda</surname> <given-names>R</given-names></name>
<name><surname>Nir</surname> <given-names>R</given-names></name>
<name><surname>Nir</surname> <given-names>D</given-names></name>
<name><surname>Bleiberg</surname> <given-names>H</given-names></name>
<name><surname>Autier</surname> <given-names>P</given-names></name>
<etal/>
</person-group>. 
<article-title>Mathematical models to discriminate between benign and Malignant adnexal masses: potential diagnostic improvement using ovarian HistoScanning</article-title>. <source>Int J Gynecol Cancer</source>. (<year>2010</year>) <volume>21</volume>:<fpage>35</fpage>&#x2013;<lpage>43</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/IGC.0b013e3182000528</pub-id>, PMID: <pub-id pub-id-type="pmid">21330829</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<label>25</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Van Holsbeke</surname> <given-names>C</given-names></name>
<name><surname>Van Calster</surname> <given-names>B</given-names></name>
<name><surname>Testa</surname> <given-names>AC</given-names></name>
<name><surname>Domali</surname> <given-names>E</given-names></name>
<name><surname>Lu</surname> <given-names>C</given-names></name>
<name><surname>Van Huffel</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>Prospective internal validation of mathematical models to predict Malignancy in adnexal masses: Results from the international ovarian tumor analysis study</article-title>. <source>Clin Cancer Res</source>. (<year>2009</year>) <volume>15</volume>:<page-range>684&#x2013;91</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1158/1078-0432.CCR-08-0113</pub-id>, PMID: <pub-id pub-id-type="pmid">19147775</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<label>26</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Moro</surname> <given-names>F</given-names></name>
<name><surname>Vagni</surname> <given-names>M</given-names></name>
<name><surname>Tran</surname> <given-names>HE</given-names></name>
<name><surname>Bernardini</surname> <given-names>F</given-names></name>
<name><surname>Mascilini</surname> <given-names>F</given-names></name>
<name><surname>Ciccarone</surname> <given-names>F</given-names></name>
<etal/>
</person-group>. 
<article-title>Radiomics analysis of ultrasound images to discriminate between benign and Malignant adnexal masses with solid ultrasound morphology</article-title>. <source>Ultrasound obstetrics gynecology</source>. (<year>2024</year>) <volume>65</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/uog.27680</pub-id>, PMID: <pub-id pub-id-type="pmid">38748935</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<label>27</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Choi</surname> <given-names>YJ</given-names></name>
<name><surname>Jung</surname> <given-names>Y</given-names></name>
</person-group>. 
<article-title>Ovarian tumor diagnosis using deep convolutional neural networks and denoising convolutional autoencoder</article-title>. <source>Cancer Res</source>. (<year>2022</year>) <volume>82</volume>:<fpage>1925</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1158/1538-7445.AM2022-1925</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<label>28</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>&#x218;tefan</surname> <given-names>PA</given-names></name>
<name><surname>Lupean</surname> <given-names>RA</given-names></name>
<name><surname>Mihu</surname> <given-names>CM</given-names></name>
<name><surname>Lebovici</surname> <given-names>A</given-names></name>
<name><surname>Oancea</surname> <given-names>MD</given-names></name>
<name><surname>H&#xee;&#x21b;u</surname> <given-names>L</given-names></name>
<etal/>
</person-group>. 
<article-title>Ultrasonography in the diagnosis of adnexal lesions: The role of texture analysis</article-title>. <source>Diagnostics</source>. (<year>2021</year>) <volume>11</volume>:<fpage>812</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics11050812</pub-id>, PMID: <pub-id pub-id-type="pmid">33947150</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<label>29</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Amidi</surname> <given-names>E</given-names></name>
<name><surname>Mostafa</surname> <given-names>A</given-names></name>
<name><surname>Nandy</surname> <given-names>S</given-names></name>
<name><surname>Yang</surname> <given-names>G</given-names></name>
<name><surname>Middleton</surname> <given-names>W</given-names></name>
<name><surname>Siegel</surname> <given-names>C</given-names></name>
<etal/>
</person-group>. 
<article-title>Classification of human ovarian cancer using functional, spectral, and imaging features obtained from <italic>in vivo</italic> photoacoustic imaging</article-title>. <source>Biomed Optics Express</source>. (<year>2019</year>) <volume>10</volume>:<page-range>2303&#x2013;17</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1364/BOE.10.002303</pub-id>, PMID: <pub-id pub-id-type="pmid">31149374</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<label>30</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lin</surname> <given-names>YX</given-names></name>
<name><surname>Zhu</surname> <given-names>Q</given-names></name>
</person-group>. 
<article-title>Classification and risk assessment of ovarian-adnexal lesions using parametric and radiomic analysis of co-registered ultrasound-photoacoustic tomographic images</article-title>. <source>Photoacoustics</source>. (<year>2024</year>) <volume>41</volume>:<fpage>100675</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.pacs.2024.100675</pub-id>, PMID: <pub-id pub-id-type="pmid">39717671</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<label>31</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>H</given-names></name>
<name><surname>Liu</surname> <given-names>C</given-names></name>
<name><surname>Zhao</surname> <given-names>Z</given-names></name>
<name><surname>Zhang</surname> <given-names>C</given-names></name>
<name><surname>Wang</surname> <given-names>X</given-names></name>
<name><surname>Li</surname> <given-names>H</given-names></name>
<etal/>
</person-group>. 
<article-title>Application of deep convolutional neural networks for discriminating benign, borderline, and Malignant serous ovarian tumors from ultrasound images</article-title>. <source>Front Oncol</source>. (<year>2021</year>) <volume>11</volume>:<elocation-id>770683</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2021.770683</pub-id>, PMID: <pub-id pub-id-type="pmid">34988015</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<label>32</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Acharya</surname> <given-names>UR</given-names></name>
</person-group>. 
<article-title>Evolutionary algorithm-based classifier parameter tuning for automatic ovarian cancer tissue characterization and classification</article-title>. <source>Ultraschall der Med (Stuttgart Germany: 1980)</source>. (<year>2012</year>) <volume>35</volume>:<page-range>237&#x2013;45</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1055/s-0032-1330336</pub-id>, PMID: <pub-id pub-id-type="pmid">23258769</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<label>33</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Moro</surname> <given-names>F</given-names></name>
<name><surname>Ciancia</surname> <given-names>M</given-names></name>
<name><surname>Zace</surname> <given-names>D</given-names></name>
<name><surname>Vagni</surname> <given-names>M</given-names></name>
<name><surname>Tran</surname> <given-names>HE</given-names></name>
<name><surname>Giudice</surname> <given-names>MT</given-names></name>
<etal/>
</person-group>. 
<article-title>Role of artificial intelligence applied to ultrasound in gynecology oncology: A systematic review</article-title>. <source>Int J Cancer</source>. (<year>2024</year>) <volume>155</volume>:<page-range>1832&#x2013;45</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/ijc.35092</pub-id>, PMID: <pub-id pub-id-type="pmid">38989809</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<label>34</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhang</surname> <given-names>YP</given-names></name>
<name><surname>Zhang</surname> <given-names>XY</given-names></name>
<name><surname>Cheng</surname> <given-names>YT</given-names></name>
<name><surname>Li</surname> <given-names>B</given-names></name>
<name><surname>Teng</surname> <given-names>XZ</given-names></name>
<name><surname>Zhang</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence-driven radiomics study in cancer: the role of feature engineering and modeling</article-title>. <source>Mil Med Res</source>. (<year>2023</year>) <volume>10</volume>:<fpage>22</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s40779-023-00458-8</pub-id>, PMID: <pub-id pub-id-type="pmid">37189155</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<label>35</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kumar</surname> <given-names>V</given-names></name>
<name><surname>Gu</surname> <given-names>Y</given-names></name>
<name><surname>Basu</surname> <given-names>S</given-names></name>
<name><surname>Berglund</surname> <given-names>A</given-names></name>
<name><surname>Eschrich</surname> <given-names>SA</given-names></name>
<name><surname>Schabath</surname> <given-names>MB</given-names></name>
<etal/>
</person-group>. 
<article-title>Radiomics: the process and the challenges</article-title>. <source>Magn Reson Imaging</source>. (<year>2012</year>) <volume>30</volume>:<page-range>1234&#x2013;48</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.mri.2012.06.010</pub-id>, PMID: <pub-id pub-id-type="pmid">22898692</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2118129">Rohan Gupta</ext-link>, University of South Carolina, United States</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1465084">Faezeh Aghajani</ext-link>, Boston Children&#x2019;s Hospital and Harvard Medical School, United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3152724">Giulia Parpinel</ext-link>, Azienda Sanitaria Locale di Biella, Italy</p></fn>
</fn-group>
</back>
</article>