<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="review-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Oncol.</journal-id>
<journal-title>Frontiers in Oncology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Oncol.</abbrev-journal-title>
<issn pub-type="epub">2234-943X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fonc.2023.1133491</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Oncology</subject>
<subj-group>
<subject>Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>AI diagnostic performance based on multiple imaging modalities for ovarian tumor: A systematic review and meta-analysis</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Ma</surname><given-names>Lin</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2154808"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Huang</surname><given-names>Liqiong</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Chen</surname><given-names>Yan</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zhang</surname><given-names>Lei</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Nie</surname><given-names>Dunli</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>He</surname><given-names>Wenjing</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Qi</surname><given-names>Xiaoxue</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>*</sup></xref>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Obstetrics and Gynecology, Chengdu First People's Hospital</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Ultrasound, Chengdu First People's Hospital, Chengdu</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>Big Data Research Center, University of Electronic Science and Technology of China</institution>, <addr-line>Chengdu</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Paolo Scollo, Kore University of Enna, Italy</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Giancarlo Conoscenti, Azienda Ospedaliera per l&#x2019;Emergenza Cannizzaro, Italy; Elsa Viora, University Hospital of the City of Health and Science of Turin, Italy</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Xiaoxue Qi, <email xlink:href="mailto:920950143@qq.com">920950143@qq.com</email>
</p>
</fn>
<fn fn-type="other" id="fn002">
<p>This article was submitted to Gynecological Oncology, a section of the journal Frontiers in Oncology</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>21</day>
<month>04</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>13</volume>
<elocation-id>1133491</elocation-id>
<history>
<date date-type="received">
<day>29</day>
<month>12</month>
<year>2022</year>
</date>
<date date-type="accepted">
<day>30</day>
<month>01</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2023 Ma, Huang, Chen, Zhang, Nie, He and Qi</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Ma, Huang, Chen, Zhang, Nie, He and Qi</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Background</title>
<p>In recent years, AI has been applied to disease diagnosis in many medical and engineering researches. We aimed to explore the diagnostic performance of the models based on different imaging modalities for ovarian cancer.</p>
</sec>
<sec>
<title>Methods</title>
<p>PubMed, EMBASE, Web of Science, and Wanfang Database were searched. The search scope was all published Chinese and English literatures about AI diagnosis of benign and malignant ovarian tumors. The literature was screened and data extracted according to inclusion and exclusion criteria. Quadas-2 was used to evaluate the quality of the included literature, STATA 17.0. was used for statistical analysis, and forest plots and funnel plots were drawn to visualize the study results.</p>
</sec>
<sec>
<title>Results</title>
<p>A total of 11 studies were included, 3 of them were modeled based on ultrasound, 6 based on MRI, and 2 based on CT. The pooled AUROCs of studies based on ultrasound, MRI and CT were 0.94 (95% CI 0.88-1.00), 0.82 (95% CI 0.71-0.93) and 0.82 (95% Cl 0.78-0.86), respectively. The values of I<sup>2</sup> were 99.92%, 99.91% and 92.64% based on ultrasound, MRI and CT. Funnel plot suggested no publication bias.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>The models based on ultrasound have the best performance in diagnostic of ovarian cancer.</p>
</sec>
</abstract>
<kwd-group>
<kwd>ovarian cancer</kwd>
<kwd>AI</kwd>
<kwd>ultrasound</kwd>
<kwd>meta-analysis</kwd>
<kwd>systematic review</kwd>
</kwd-group>
<counts>
<fig-count count="4"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="31"/>
<page-count count="7"/>
<word-count count="2721"/>
</counts>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<title>Introduction</title>
<p>Ovarian cancer (OC) is the malignant tumor with the highest death rate in the female reproductive system, with a high incidence rate and mortality (<xref ref-type="bibr" rid="B1">1</xref>). Among gynecological tumors, the incidence rate ranks third and the mortality ranks first, surpassing cervical cancer and endometrial cancer, posing a serious threat to the health of women (<xref ref-type="bibr" rid="B2">2</xref>). Unnecessary surgery leads to reduced fertility, therefore, accurate preoperative assessment of the risk of malignancy can help physicians provide individualized treatment for patients (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<p>At present, the diagnosis of OC mainly relies on pathological examination and medical imaging techniques which can assist in the diagnosis and treatment of OC (<xref ref-type="bibr" rid="B4">4</xref>). However, due to the insidious nature of OC, the physician&#x2019;s visual inspection of medical images can&#x2019;t provide enough information to personalize the treatment for the patient (<xref ref-type="bibr" rid="B5">5</xref>). Artificial intelligence (AI) can automatically recognize complex patterns in imaging data, extract potential information from medical images, and provide quantitative assessment of radiographic characteristics (<xref ref-type="bibr" rid="B6">6</xref>). The application of artificial intelligence in medical imaging mainly includes two categories of radiomics and deep learning (<xref ref-type="bibr" rid="B7">7</xref>). This non-invasive approach reduces patient pain and helps physicians personalize treatment for patients.</p>
<p>For OC patients, proper and accurate preoperative imaging is very important for the treatment of cancer. Ultrasound is mainly used for early screening of ovarian cancer (<xref ref-type="bibr" rid="B8">8</xref>). The computed tomography (CT) imaging is the standard for preoperative evaluation of patients with OC; magnetic resonance imaging (MRI) focuses on imaging small peritoneal deposits in difficult-to-resect areas (<xref ref-type="bibr" rid="B9">9</xref>). To our knowledge, a recent study conducted Mata-analysis of studies which early predicted different kinds of diseases based on AI, demonstrating the important role of AI in disease diagnosis (<xref ref-type="bibr" rid="B10">10</xref>). Another study evaluated the diagnostic performance of artificial intelligence for lymph node metastasis in abdominopelvic malignancies and found that the diagnostic ability of artificial intelligence was higher than the subjective judgment of physicians (<xref ref-type="bibr" rid="B11">11</xref>). However, these studies have ignored the impact of different imaging modalities on artificial intelligence diagnostic results.</p>
<p>The purpose of this study was to conduct a systematic review and meta-analysis of published data on ovarian cancer to assess the accuracy of artificial intelligence in the application of multiple imaging modalities for OC.</p>
</sec>
<sec id="s2">
<title>Methods</title>
<sec id="s2_1">
<title>Search strategy</title>
<p>In this study, the Preferred Reporting Item of the Guidelines for Systematic Reviews and Meta-Analysis (PRISMA) was used as the search rule (<xref ref-type="bibr" rid="B12">12</xref>), and the databases used for the search were PubMed, EMBASE, Web of Science, and Wanfang Database. <xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref> shows the method of search. The search was conducted using subject terms including &#x201c;radiomics,&#x201d; &#x201c;deep learning,&#x201d; &#x201c;Artificial intelligence,&#x201d; &#x201c;ovarian cancer,&#x201d; and &#x201c;malignant ovarian tumors&#x201d;. Combine the results of different queries by using the Boolean operator AND. Any eligible studies were considered preliminary search results. To get all relevant literatures, we searched the reference list of relevant studies by manual search.</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Search Strategy.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Sources</th>
<th valign="middle" align="center">Search in</th>
<th valign="middle" align="center">MeSH terms</th>
<th valign="middle" align="center">Limits</th>
<th valign="middle" align="center">Search results</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Web of Science</td>
<td valign="middle" align="left">Search manager</td>
<td valign="middle" align="left">(&#x201c;Artificial intelligence&#x201d; OR&#x201d;AI&#x201d; OR&#x201d;deep learning&#x201d; OR &#x201c;machine learning&#x201d; OR &#x201c;radiomics&#x201d; OR &#x201c;radiomic&#x201d;) AND (&#x201c;CT&#x201d; OR &#x201c;MRI&#x201d;OR &#x201c;ultrasound&#x201d;) AND (&#x201c;ovarian cancer&#x201d; OR &#x201c; malignant ovarian tumors&#x201d; OR &#x201c; OC &#x201c;)</td>
<td valign="middle" align="left">None</td>
<td valign="middle" align="center">5</td>
</tr>
<tr>
<td valign="middle" align="left">PubMed, (MEDLINE)</td>
<td valign="middle" align="left">N/A</td>
<td valign="middle" align="left">(&#x201c;Artificial intelligence&#x201d; OR&#x201d;AI&#x201d; OR&#x201d;deep learning&#x201d; OR &#x201c;machine learning&#x201d; OR &#x201c;radiomics&#x201d; OR &#x201c;radiomic&#x201d;) AND (&#x201c;CT&#x201d; OR &#x201c;MRI&#x201d;OR &#x201c;ultrasound&#x201d;) AND (&#x201c;ovarian tumors&#x201d; OR &#x201c;benign and malignant ovarian tumors&#x201d;) AND (&#x201c;ovarian cancer&#x201d; OR &#x201c; OC &#x201c;)</td>
<td valign="middle" align="left">None</td>
<td valign="middle" align="center">29</td>
</tr>
<tr>
<td valign="middle" align="left">EMBASE</td>
<td valign="middle" align="left">Quick search</td>
<td valign="middle" align="left">(&#x2018;Artificial intelligence&#x2019;/exp OR &#x2018;Artificial intelligence&#x2019; OR &#x2018;AI&#x2019;/exp OR &#x2018;AI&#x2019;OR &#x2018;machine learning&#x2019;/exp OR &#x2018;machine learning&#x2019; OR &#x2018;radiomics&#x2019;/exp OR &#x2018;radiomics&#x2019; OR &#x2018;radiomic&#x2019;) AND (&#x2018;ct&#x2019;/exp OR &#x2018;ct&#x2019; OR &#x2018;mri&#x2019;/exp OR &#x2018;mri&#x2019;OR &#x2018;ultrasound&#x2019;/exp OR &#x2018;ultrasound&#x2019;) AND (&#x2018;ovarian tumors&#x2019;/exp OR &#x2018;ovarian tumors&#x2019; OR &#x2018;benign and malignant ovarian tumors&#x2019;/exp OR &#x2018;benign and malignant ovarian tumors&#x2019;) AND (&#x2018;ovarian cancer&#x2019;/exp OR &#x2018; OC &#x2018;)</td>
<td valign="middle" align="left">None</td>
<td valign="middle" align="center">30</td>
</tr>
<tr>
<td valign="middle" align="left">Wanfang Database</td>
<td valign="middle" align="left">N/A</td>
<td valign="middle" align="left">(&#x201c;Artificial intelligence&#x201d; OR&#x201d;AI&#x201d; OR&#x201d;deep learning&#x201d; OR &#x201c;machine learning&#x201d; OR &#x201c;radiomics&#x201d; OR &#x201c;radiomic&#x201d;) AND (&#x201c;CT&#x201d; OR &#x201c;MRI&#x201d;OR &#x201c;ultrasound&#x201d;) AND (&#x201c;ovarian tumors&#x201d; OR &#x201c;malignant ovarian tumors&#x201d;) AND (&#x201c;ovarian cancer&#x201d; OR &#x201c; OC &#x201c;)</td>
<td valign="middle" align="left">None</td>
<td valign="middle" align="center">7</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>N/A, Not Applicable.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s2_2">
<title>Study selection</title>
<p>The inclusion and exclusion criteria for studies were as follows. Inclusion criteria: (1) retrospective or prospective studies evaluating the diagnostic efficacy of AI in identifying ovarian tumors of patients; and (2) patients with ovarian tumor. Exclusion criteria: (1) animal studies, case reports, conference literature; (2) insufficient computable data; and (3) duplicate reports or studies based on the same data. Two researchers used Covidence software to screen studies and identified titles and abstracts. Disagreements in the process of study screening were arbitrated and agreed upon by a third author.</p>
</sec>
<sec id="s2_3">
<title>Data extraction</title>
<p>Data were extracted from all eligible studies, and information extracted included:first author, country, year of publication, type of AI model, number of patients, age of patients, type of tumor, type of stu1dy and imaging modality. The area under the receiver operating characteristic curve (AUROC), sensitivity (SEN), specificity (SPE), and accuracy are used to evaluate the performance of the models, with AUROC being considered the most important metric. The data we extracted was used for data processing and forest map production.</p>
</sec>
<sec id="s2_4">
<title>Quality assessment</title>
<p>The Quality Assessment of Diagnostic Accuracy Studies Scale (QUADAS-2) was used to assess the risk of bias of included studies (<xref ref-type="bibr" rid="B13">13</xref>). First, the two researchers responded to each study&#x2019;s landmark questions using three options: &#x201c;yes&#x201d;, &#x201c;no&#x201d;, and &#x201c;uncertain&#x201d;. Then the third researcher used the QUADAS-2 to rate the risk of bias into three categories, &#x201c;low,&#x201d; &#x201c;high,&#x201d; or &#x201c;uncertain&#x201d;.</p>
</sec>
<sec id="s2_5">
<title>Statistical analysis</title>
<p>Meta-analysis of the included literature was implemented in this study using STATA 17.0. If the study population is divided into training and test sets, only the test set data are included as metrics. If multiple models were used simultaneously in a given study, we only select the model with the median AUROC value. Continuous variables were described using mean difference (MD) as well as 95% confidence interval (CI), and were considered statistically significant when P&lt;0.05. Heterogeneity was assessed according to discordance index (I<sup>2</sup>) (<xref ref-type="bibr" rid="B14">14</xref>). If I<sup>2</sup>&lt;50%, it indicated low heterogeneity of Meta-analysis results and a fixed-effect model could be selected. Contrary, if I<sup>2</sup>&#x2267;50% that indicated high heterogeneity of Meta-analysis results and a random-effect model could be selected. Funnel plots and Egger tests (<xref ref-type="bibr" rid="B15">15</xref>) were used to assess whether there was publication bias in the results of Meta-analysis. When publication bias existed, the results of Meta-analysis were further analyzed for stability and reliability using the cut-and-patch method. In addition, sensitivity analysis was used to assess the robustness of the results of Meta-analysis. Sensitivity analyses excluding one study at a time were conducted to clarify whether the results were driven by one large study or a study with an extreme result.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<title>Results</title>
<sec id="s3_1">
<title>Study selection</title>
<p>In total, 71 studies were identified after removing duplicates, but 20 studies with non-compliant titles and abstracts were excluded. After full-text screening of the remaining 51 studies, only 26 studies met the requirements, but 15 of them had insufficient data, and the last 11 studies were used in our Meta-analysis (<xref ref-type="bibr" rid="B16">16</xref>&#x2013;<xref ref-type="bibr" rid="B25">25</xref>). <xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref> shows the selection process of our study.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Study selection process.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1133491-g001.tif"/>
</fig>
</sec>
<sec id="s3_2">
<title>Study characteristics</title>
<p>We finally selected 11 studies for meta-analysis, and the characteristics of each study are summarized in <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>. All of the studies we screened were retrospective, and two of them had independent validation set. Four studies built deep learning models and seven built radiomics models. In addition, the gold standard of diagnosis in most studies is pathology. In these studies, 3 types of medical imaging were used, 3 with ultrasound, 6 with MRI, and only 2 with CT. The results of the Meta-analysis of the AUROC values are presented in the form of forest plots in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Selected characteristics.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">First Author</th>
<th valign="middle" align="center">Country</th>
<th valign="middle" align="center">Year</th>
<th valign="middle" align="center">Study design</th>
<th valign="middle" align="center">Patients</th>
<th valign="middle" align="center">Mean or Median age (SD; range), years</th>
<th valign="middle" align="center">Imaging modality</th>
<th valign="middle" align="center">Type of malignancy</th>
<th valign="middle" align="center">AI model (Per-patient/per-node diagnostic output)</th>
<th valign="middle" align="center">Reference standard</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Christiansen-1</td>
<td valign="middle" align="left">Sweden</td>
<td valign="middle" align="center">2021</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">758</td>
<td valign="middle" align="center">_</td>
<td valign="middle" align="left">Ultrasound</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Deep learning (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Wang-2</td>
<td valign="middle" align="left">China</td>
<td valign="middle" align="center">2021</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">265</td>
<td valign="middle" align="center">_</td>
<td valign="middle" align="left">Ultrasound</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Deep learning (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Aramend&#xed;a-Vidaurreta-3</td>
<td valign="middle" align="left">Spain</td>
<td valign="middle" align="center">2015</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">145</td>
<td valign="middle" align="center">43(35-65)</td>
<td valign="middle" align="left">Ultrasound</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Deep learning (per-patient)</td>
<td valign="middle" align="left">_</td>
</tr>
<tr>
<td valign="middle" align="left">Wang-4</td>
<td valign="middle" align="left">USA</td>
<td valign="middle" align="center">2021</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">451</td>
<td valign="middle" align="center">47.8</td>
<td valign="middle" align="left">MRI</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Deep learning (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Li-5</td>
<td valign="middle" align="left">China</td>
<td valign="middle" align="center">2021</td>
<td valign="middle" align="left">Retrospective Multi-center</td>
<td valign="middle" align="center">134</td>
<td valign="middle" align="center">47.3</td>
<td valign="middle" align="left">MRI</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Radiomics (per-patient)</td>
<td valign="middle" align="left">Radiology</td>
</tr>
<tr>
<td valign="middle" align="left">Liu-6</td>
<td valign="middle" align="left">China</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">196</td>
<td valign="middle" align="center">45.85(13.5)</td>
<td valign="middle" align="left">MRI</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Radiomics (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Zhuang-7</td>
<td valign="middle" align="left">China</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">91</td>
<td valign="middle" align="center">37</td>
<td valign="middle" align="left">MRI</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Radiomics (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Zhang-8</td>
<td valign="middle" align="left">China</td>
<td valign="middle" align="center">2019</td>
<td valign="middle" align="left">Retrospective Multi-center</td>
<td valign="middle" align="center">286</td>
<td valign="middle" align="center">_</td>
<td valign="middle" align="left">MRI</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Radiomics (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Mimura-9</td>
<td valign="middle" align="left">Japan</td>
<td valign="middle" align="center">2016</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">42</td>
<td valign="middle" align="center">49.7</td>
<td valign="middle" align="left">MRI</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Radiomics (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Yu-10</td>
<td valign="middle" align="left">China</td>
<td valign="middle" align="center">2021</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">182</td>
<td valign="middle" align="center">47.1</td>
<td valign="middle" align="left">CT</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Radiomics (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
<tr>
<td valign="middle" align="left">Li-11</td>
<td valign="middle" align="left">China</td>
<td valign="middle" align="center">2022</td>
<td valign="middle" align="left">Retrospective Single-center</td>
<td valign="middle" align="center">140</td>
<td valign="middle" align="center">_</td>
<td valign="middle" align="left">CT</td>
<td valign="middle" align="left">Ovarian tumors</td>
<td valign="middle" align="left">Radiomics (per-patient)</td>
<td valign="middle" align="left">Pathology</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Forest plots of Meta-analysis.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1133491-g002.tif"/>
</fig>
</sec>
<sec id="s3_3">
<title>Quality assessment</title>
<p>QUADAS-2 was used to assess the risk of bias in the study, and the results are shown in <xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>. For patient selection, all studies were low risk of bias. However, risk of bias was unclear of flow and timing for all 11 studies. For index test, 9 studies (81.82%) with high risk of bias, 2 studies (18.18%) with low. 9 studies (81.82%) with low and 2 (18.18%) with unclear risk of bias in reference standard. <xref ref-type="supplementary-material" rid="ST1"><bold>Table S1</bold></xref> shown individual evaluation of the risk of bias and applicability. For applicability concerns, overall risk is low. Funnel plot (<xref ref-type="supplementary-material" rid="SF1"><bold>Figure S1</bold></xref>) and Egger test were used to evaluate whether publication bias existed in the results of the meta-analysis. When publication bias exists, shear and supplement method is used to further analyze whether the results of meta-analysis are stable and reliable (<xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>). In addition, sensitivity analysis was used to evaluate whether the results of the meta-analysis were robust (<xref ref-type="supplementary-material" rid="SF2"><bold>Figure S2</bold></xref>).</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>The quality assessment of 11 included studies by QUADAS-2 tool.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1133491-g003.tif"/>
</fig>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>The funnel plot treated by the shear and supplement method.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fonc-13-1133491-g004.tif"/>
</fig>
</sec>
<sec id="s3_4">
<title>Diagnostic accuracy</title>
<p>In these studies, the AUROC, sensitivity and specificity were used to assess the diagnostic performance of models. The categorized data extraction for each study report is shown in <xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>. As shown in <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref>, AI models based on ultrasound had the best diagnostic performance, followed by MRI, and CT was the worst. The pooled AUROC of studies based on ultrasound, MRI and CT were 0.94 (95% CI 0.88-1.00), 0.82 (95% CI 0.71-0.93) and 0.82 (95% Cl 0.78-0.86), respectively. In addition, the heterogeneity of all these studies was high, the values of I<sup>2</sup> reached 99.92%, 99.91% and 92.64% based on ultrasound, MRI and CT. The combined AUROC of all 11 included studies was 0.85 (95%CI 0.81-0.89) and I<sup>2</sup> was 99.88%.</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Data assessment.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">First Author</th>
<th valign="middle" align="center">Sensitivity, %</th>
<th valign="middle" align="center">Specificity, %</th>
<th valign="middle" align="center">Accuracy, %</th>
<th valign="middle" align="center">AUROC</th>
<th valign="middle" align="center">95%CI</th>
<th valign="middle" align="center">Imaging modality</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Christiansen-1</td>
<td valign="middle" align="center">0.96</td>
<td valign="middle" align="center">0.867</td>
<td valign="middle" align="center">91.3</td>
<td valign="middle" align="center">0.950</td>
<td valign="middle" align="center">0.897-0.987</td>
<td valign="middle" align="left">Ultrasound</td>
</tr>
<tr>
<td valign="middle" align="left">Wang-2</td>
<td valign="middle" align="center">&#x2013;</td>
<td valign="middle" align="center">0.9</td>
<td valign="middle" align="center">0.9</td>
<td valign="middle" align="center">0.963</td>
<td valign="middle" align="center">0.821-0.945</td>
<td valign="middle" align="left">Ultrasound</td>
</tr>
<tr>
<td valign="middle" align="left">Aramend&#xed;a-3</td>
<td valign="middle" align="center">0.985</td>
<td valign="middle" align="center">0.989</td>
<td valign="middle" align="center">0.9878</td>
<td valign="middle" align="center">0.997</td>
<td valign="middle" align="center">0.862-0.917</td>
<td valign="middle" align="left">Ultrasound</td>
</tr>
<tr>
<td valign="middle" align="left">Wang-4</td>
<td valign="middle" align="center">0.69</td>
<td valign="middle" align="center">0.81</td>
<td valign="middle" align="center">0.77</td>
<td valign="middle" align="center">0.83</td>
<td valign="middle" align="center">0.06-0.37</td>
<td valign="middle" align="left">MRI</td>
</tr>
<tr>
<td valign="middle" align="left">Li-5</td>
<td valign="middle" align="center">0.9</td>
<td valign="middle" align="center">0.8</td>
<td valign="middle" align="center">0.8</td>
<td valign="middle" align="center">0.87</td>
<td valign="middle" align="center">0.665-0.925</td>
<td valign="middle" align="left">MRI</td>
</tr>
<tr>
<td valign="middle" align="left">Liu-6</td>
<td valign="middle" align="center">0.936</td>
<td valign="middle" align="center">0.717</td>
<td valign="middle" align="center">0.828</td>
<td valign="middle" align="center">0.840</td>
<td valign="middle" align="center">0.83-0.96</td>
<td valign="middle" align="left">MRI</td>
</tr>
<tr>
<td valign="middle" align="left">Zhuang-7</td>
<td valign="middle" align="center">0.67</td>
<td valign="middle" align="center">0.82</td>
<td valign="middle" align="center">0.76</td>
<td valign="middle" align="center">0.86</td>
<td valign="middle" align="center">0.80-0.99</td>
<td valign="middle" align="left">MRI</td>
</tr>
<tr>
<td valign="middle" align="left">Zhang-8</td>
<td valign="middle" align="center">0.9441</td>
<td valign="middle" align="center">0.7885</td>
<td valign="middle" align="center">0.9026</td>
<td valign="middle" align="center">0.9746</td>
<td valign="middle" align="center">0.791-0.943</td>
<td valign="middle" align="left">MRI</td>
</tr>
<tr>
<td valign="middle" align="left">Mimura-9</td>
<td valign="middle" align="center">0.762</td>
<td valign="middle" align="center">0.813</td>
<td valign="middle" align="center">_</td>
<td valign="middle" align="center">0.795</td>
<td valign="middle" align="center">0.825-0.94</td>
<td valign="middle" align="left">MRI</td>
</tr>
<tr>
<td valign="middle" align="left">Yu-10</td>
<td valign="middle" align="center">0.8</td>
<td valign="middle" align="center">0.75</td>
<td valign="middle" align="center">0.78</td>
<td valign="middle" align="center">0.86</td>
<td valign="middle" align="center">0.716-0.884</td>
<td valign="middle" align="left">CT</td>
</tr>
<tr>
<td valign="middle" align="left">Li-11</td>
<td valign="middle" align="center">0.818</td>
<td valign="middle" align="center">0.789</td>
<td valign="middle" align="center">0.805</td>
<td valign="middle" align="center">0.87</td>
<td valign="middle" align="center">0.651-0.912</td>
<td valign="middle" align="left">CT</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>-, Not report.</p>
</table-wrap-foot>
</table-wrap>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<title>Discussion</title>
<p>Medical imaging is the most effective way to assist clinical diagnosis and analyzing the condition for doctors. Imaging method is important for patients with OC because different images help to determine the feasibility of surgical approach and treatment (<xref ref-type="bibr" rid="B4">4</xref>). Our review is the first meta-analysis to evaluate the ability of artificial intelligence to identify benign and malignant ovarian cancer under different imaging modalities.</p>
<p>AI based medical imaging break through the technical barriers of traditional methods which have used in clinical practice, assisting physicians in lesion identification and diagnosis, efficacy assessment, and survival prognosis to improve diagnostic efficiency of doctors (<xref ref-type="bibr" rid="B26">26</xref>). Diagnosis of ovarian tumors still requires surgical removal, and the surgeon&#x2019;s decision making is sometimes challenging in cases where preoperative examination finds atypical. Therefore, if AI can calculate the probability of ovarian cancer based on the results of preoperative examination and predict the final diagnosis, the management level of ovarian cancer will be improved (<xref ref-type="bibr" rid="B27">27</xref>). Benign ovarian tumors can avoid unnecessary surgery, and early diagnosis of ovarian cancer can improve the prognosis. In addition, for preoperative diagnosis, patients can receive a more informative probabilistic numerical interpretation (<xref ref-type="bibr" rid="B28">28</xref>). Preoperative diagnosis is more accurate and specific in the probability of ovarian tumor management decisions (<xref ref-type="bibr" rid="B29">29</xref>). AI extracts features from different types of images differently, and our study shows that the features extracted based on ultrasound images are better overall for the diagnosis of OC (<xref ref-type="bibr" rid="B27">27</xref>). Our results are consistent with a previous study which confirmed ultrasound was effective tools to characterize ovarian masses (<xref ref-type="bibr" rid="B30">30</xref>).</p>
<p>A total of 11 studies were included in our analysis, of which three were based on ultrasound, six were based on MRI and two based on CT. However, only three of them built deep learning models. This may be due to the fact that deep learning techniques are relatively new and prone to bias. In a recent study, the authors selected the best performed model to extract data for meta-analysis, but in our study, if multiple models were built in a study, we chose the model with the median AUROC value, which may better reflect the overall diagnostic performance of the models in a study. Finally, although most studies divided patients into training and test sets, most of them were monocentric and external validation was particularly important in the study.</p>
<p>However, there are some limitations to our study. First, scanning parameters (including field intensity, contrast agent type, injection velocity, etc.) are not uniform, and the analysis software is different. Then studies that only include Chinese and English literature may have some linguistic bias; In addition, the vast majority of the study&#x2019;s first authors were from China, as were most of the cases, so there may be some bias. We should also critically consider some methodological issues. Modern information processing techniques to develop radiology report databases can improve report retrieval and help radiologists make diagnoses (<xref ref-type="bibr" rid="B31">31</xref>). We need to advocate for Internet networks to identify patient data from all over the world, and large-scale training of AI based on different patient demographics, geographic regions, diseases, and so on. In addition, we highlight the need for a more diverse database of images for rare cancers, including OC.</p>
</sec>
<sec id="s5" sec-type="conclusion">
<title>Conclusion</title>
<p>AI can play an adjunctive role in identifying benign and malignant ovarian tumors, and the models based on ultrasound has the best diagnostic ability, but due to the limitations of the number and quality of included studies, the above conclusions need to be viewed with caution, and more standardized and prospective studies need to be conducted to confirm them.</p>
<p>In conclusion, AI algorithms show good performance in diagnosing OC through medical imaging. Stricter reporting standards that address specific challenges in AI research could improve future research.</p>
</sec>
<sec id="s6" sec-type="author-contributions">
<title>Author contributions</title>
<p>All authors had full access to all the data in the study and take responsibility for the integrity of the data and the accuracy of the data analysis. QX designed the study. ND, HL and ZL acquired the study data. CY and HW analyzed and interpreted the data. ML wrote the first draft of the manuscript. All authors revised the manuscript and approved it for publication.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="funding-information">
<title>Funding</title>
<p>Sichuan Medical Association project (Grant No. 2019HR65).</p>
</sec>
<sec id="s8" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s9" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s10" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fonc.2023.1133491/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fonc.2023.1133491/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Table_1.docx" id="ST1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/>
<supplementary-material xlink:href="Image_1.tif" id="SF1" mimetype="image/tiff"/>
<supplementary-material xlink:href="Image_2.tif" id="SF2" mimetype="image/tiff"/>
</sec>
<fn-group>
<title>Abbreviations</title>
<fn fn-type="abbr">
<p>CT, Computer tomography; MRI, Magnetic resonance imaging; AI, Artificial intelligence; ML, machine learning; QUADAS-2, Quality Assessment of Diagnostic Accuracy Studies tool 2; AUROC, Area under the receiver operating characteristic curve.</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Stewart</surname> <given-names>C</given-names>
</name>
<name>
<surname>Ralyea</surname> <given-names>C</given-names>
</name>
<name>
<surname>Lockwood</surname> <given-names>S</given-names>
</name>
</person-group>. <article-title>Ovarian cancer: An integrated review</article-title>. <source>Semin Oncol Nurs</source> (<year>2019</year>) <volume>35</volume>(<issue>2</issue>):<page-range>151&#x2013;6</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.soncn.2019.02.001</pub-id>
</citation>
</ref>
<ref id="B2">
<label>2</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Eisenhauer</surname> <given-names>EA</given-names>
</name>
</person-group>. <article-title>Real-world evidence in the treatment of ovarian cancer</article-title>. <source>Ann Oncol</source> (<year>2017</year>) <volume>28</volume>(<supplement>suppl_8</supplement>):<page-range>viii61&#x2013;65</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/annonc/mdx443</pub-id>
</citation>
</ref>
<ref id="B3">
<label>3</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Christiansen</surname> <given-names>F</given-names>
</name>
<name>
<surname>Epstein</surname> <given-names>EL</given-names>
</name>
<name>
<surname>Smedberg</surname> <given-names>E</given-names>
</name>
<name>
<surname>&#xc5;kerlund</surname> <given-names>M</given-names>
</name>
<name>
<surname>Smith</surname> <given-names>K</given-names>
</name>
<name>
<surname>Epstein</surname> <given-names>E</given-names>
</name>
</person-group>. <article-title>Ultrasound image analysis using deep neural networks for discriminating between benign and malignant ovarian tumors: comparison with expert subjective assessment</article-title>. <source>Ultrasound Obstet Gynecol</source> (<year>2021</year>) <volume>57</volume>(<issue>1</issue>):<page-range>155&#x2013;63</page-range>. doi: <pub-id pub-id-type="doi">10.1002/uog.23530</pub-id>
</citation>
</ref>
<ref id="B4">
<label>4</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mironov</surname> <given-names>S</given-names>
</name>
<name>
<surname>Akin</surname> <given-names>O</given-names>
</name>
<name>
<surname>Pandit-Taskar</surname> <given-names>N</given-names>
</name>
<name>
<surname>Hann</surname> <given-names>LE</given-names>
</name>
</person-group>. <article-title>Ovarian cancer</article-title>. <source>Radiol Clin North Am</source> (<year>2007</year>) <volume>45</volume>(<issue>1</issue>):<page-range>149&#x2013;66</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.rcl.2006.10.012</pub-id>
</citation>
</ref>
<ref id="B5">
<label>5</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shetty</surname> <given-names>M</given-names>
</name>
</person-group>. <article-title>Imaging and Differential Diagnosis of Ovarian Cancer</article-title>. <source>Semin Ultrasound CT MR</source> (<year>2019</year>) <volume>40</volume>(<issue>4</issue>):<page-range>302&#x2013;18</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1053/j.sult.2019.04.002</pub-id>
</citation>
</ref>
<ref id="B6">
<label>6</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hosny</surname> <given-names>A</given-names>
</name>
<name>
<surname>Parmar</surname> <given-names>C</given-names>
</name>
<name>
<surname>Quackenbush</surname> <given-names>J</given-names>
</name>
<name>
<surname>Schwartz</surname> <given-names>LH</given-names>
</name>
<name>
<surname>Aerts</surname> <given-names>HJWL</given-names>
</name>
</person-group>. <article-title>Artificial intelligence in radiology</article-title>. <source>Nat Rev Cancer</source> (<year>2018</year>) <volume>18</volume>(<issue>8</issue>):<page-range>500&#x2013;10</page-range>. doi: <pub-id pub-id-type="doi">10.1038/s41568-018-0016-5</pub-id>
</citation>
</ref>
<ref id="B7">
<label>7</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sogani</surname> <given-names>J</given-names>
</name>
<name>
<surname>Allen</surname> <given-names>B</given-names>
<suffix>Jr</suffix>
</name>
<name>
<surname>Dreyer</surname> <given-names>K</given-names>
</name>
<name>
<surname>McGinty</surname> <given-names>G</given-names>
</name>
</person-group>. <article-title>Artificial intelligence in radiology: the ecosystem essential to improving patient care</article-title>. <source>Clin Imaging</source> (<year>2020</year>) <volume>59</volume>(<issue>1</issue>):<page-range>A3&#x2013;6</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.clinimag.2019.08.001</pub-id>
</citation>
</ref>
<ref id="B8">
<label>8</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Campbell</surname> <given-names>S</given-names>
</name>
<name>
<surname>Gentry-Maharaj</surname> <given-names>A</given-names>
</name>
</person-group>. <article-title>The role of transvaginal ultrasound in screening for ovarian cancer</article-title>. <source>Climacteric</source> (<year>2018</year>) <volume>21</volume>(<issue>3</issue>):<page-range>221&#x2013;6</page-range>. doi: <pub-id pub-id-type="doi">10.1080/13697137.2018.1433656</pub-id>
</citation>
</ref>
<ref id="B9">
<label>9</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rizzo</surname> <given-names>S</given-names>
</name>
<name>
<surname>Del Grande</surname> <given-names>M</given-names>
</name>
<name>
<surname>Manganaro</surname> <given-names>L</given-names>
</name>
<name>
<surname>Papadia</surname> <given-names>A</given-names>
</name>
<name>
<surname>Del Grande</surname> <given-names>F</given-names>
</name>
</person-group>. <article-title>Imaging before cytoreductive surgery in advanced ovarian cancer patients</article-title>. <source>Int J Gynecol Cancer</source> (<year>2020</year>) <volume>30</volume>(<issue>1</issue>):<page-range>133&#x2013;8</page-range>. doi: <pub-id pub-id-type="doi">10.1136/ijgc-2019-000819</pub-id>
</citation>
</ref>
<ref id="B10">
<label>10</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kumar</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Koul</surname> <given-names>A</given-names>
</name>
<name>
<surname>Singla</surname> <given-names>R</given-names>
</name>
<name>
<surname>Ijaz</surname> <given-names>MF</given-names>
</name>
</person-group>. <article-title>Artificial intelligence in disease diagnosis: a systematic literature review, synthesizing framework and future research agenda [published online ahead of print, 2022 Jan 13]</article-title>. <source>J Ambient Intell Humaniz Comput</source> (<year>2022</year>) <volume>1-28</volume>. doi: <pub-id pub-id-type="doi">10.1007/s12652-021-03612-z</pub-id>
</citation>
</ref>
<ref id="B11">
<label>11</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bedrikovetski</surname> <given-names>S</given-names>
</name>
<name>
<surname>Dudi-Venkata</surname> <given-names>NN</given-names>
</name>
<name>
<surname>Maicas</surname> <given-names>G</given-names>
</name>
<name>
<surname>Kroon</surname> <given-names>HM</given-names>
</name>
<name>
<surname>Seow</surname> <given-names>W</given-names>
</name>
<name>
<surname>Carneiro</surname> <given-names>G</given-names>
</name>
<etal/>
</person-group>. <article-title>Artificial intelligence for the diagnosis of lymph node metastases in patients with abdominopelvic malignancy: A systematic review and meta-analysis</article-title>. <source>Artif Intell Med</source> (<year>2021</year>) <volume>113</volume>:<fpage>102022</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.artmed.2021.102022</pub-id>
</citation>
</ref>
<ref id="B12">
<label>12</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moher</surname> <given-names>D</given-names>
</name>
<name>
<surname>Liberati</surname> <given-names>A</given-names>
</name>
<name>
<surname>Tetzlaff</surname> <given-names>J</given-names>
</name>
<name>
<surname>Altman</surname> <given-names>DG</given-names>
</name>
<collab>PRISMA Group</collab>
</person-group>. <article-title>Preferred reporting items for systematic reviews and meta-analyses: the PRISMA statement</article-title>. <source>PloS Med</source> (<year>2009</year>) <volume>6</volume>(<issue>7</issue>):<elocation-id>e1000097</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1371/journal.pmed.1000097</pub-id>
</citation>
</ref>
<ref id="B13">
<label>13</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Whiting</surname> <given-names>PF</given-names>
</name>
<name>
<surname>Rutjes</surname> <given-names>AW</given-names>
</name>
<name>
<surname>Westwood</surname> <given-names>ME</given-names>
</name>
<name>
<surname>Mallett</surname> <given-names>S</given-names>
</name>
<name>
<surname>Deeks</surname> <given-names>JJ</given-names>
</name>
<name>
<surname>Reitsma</surname> <given-names>JB</given-names>
</name>
<etal/>
</person-group>. <article-title>QUADAS-2: a revised tool for the quality assessment of diagnostic accuracy studies</article-title>. <source>Ann Intern Med</source> (<year>2011</year>) <volume>155</volume>:<page-range>529&#x2013;36</page-range>. doi: <pub-id pub-id-type="doi">10.7326/0003-4819-155-8-201110180-00009</pub-id>
</citation>
</ref>
<ref id="B14">
<label>14</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Higgins</surname> <given-names>JP</given-names>
</name>
<name>
<surname>Thompson</surname> <given-names>SG</given-names>
</name>
<name>
<surname>Deeks</surname> <given-names>JJ</given-names>
</name>
<name>
<surname>Altman</surname> <given-names>DG</given-names>
</name>
</person-group>. <article-title>Measuring inconsistency in meta-analyses</article-title>. <source>BMJ</source> (<year>2003</year>) <volume>327</volume>(<issue>7414</issue>):<page-range>557&#x2013;60</page-range>. doi: <pub-id pub-id-type="doi">10.1136/bmj.327.7414.557</pub-id>
</citation>
</ref>
<ref id="B15">
<label>15</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Egger</surname> <given-names>M</given-names>
</name>
<name>
<surname>Smith</surname> <given-names>GD</given-names>
</name>
<name>
<surname>Schneider</surname> <given-names>M</given-names>
</name>
<name>
<surname>Minder</surname> <given-names>C</given-names>
</name>
</person-group>. <article-title>Bias in meta-analysis detected by a simple, graphical test</article-title>. <source>BMJ</source> (<year>1997</year>) <volume>315</volume>:<page-range>629&#x2013;34</page-range>. doi: <pub-id pub-id-type="doi">10.1136/bmj.315.7109.629</pub-id>
</citation>
</ref>
<ref id="B16">
<label>16</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>C</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>C</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>X</given-names>
</name>
<name>
<surname>Li</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>Application of deep convolutional neural networks for discriminating benign, borderline, and malignant serous ovarian tumors from ultrasound images</article-title>. <source>Front Oncol</source> (<year>2021</year>) <volume>11</volume>:<elocation-id>770683</elocation-id>. doi: <pub-id pub-id-type="doi">10.3389/fonc.2021.770683</pub-id>
</citation>
</ref>
<ref id="B17">
<label>17</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aramend&#xed;a-Vidaurreta</surname> <given-names>V</given-names>
</name>
<name>
<surname>Cabeza</surname> <given-names>R</given-names>
</name>
<name>
<surname>Villanueva</surname> <given-names>A</given-names>
</name>
<name>
<surname>Navallas</surname> <given-names>J</given-names>
</name>
<name>
<surname>Alc&#xe1;zar</surname> <given-names>JL</given-names>
</name>
</person-group>. <article-title>Ultrasound image discrimination between benign and malignant adnexal masses based on a neural network approach</article-title>. <source>Ultrasound Med Biol</source> (<year>2016</year>) <volume>42</volume>(<issue>3</issue>):<page-range>742&#x2013;52</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.ultrasmedbio.2015.11.014</pub-id>
</citation>
</ref>
<ref id="B18">
<label>18</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>R</given-names>
</name>
<name>
<surname>Cai</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>IK</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>R</given-names>
</name>
<name>
<surname>Purkayastha</surname> <given-names>S</given-names>
</name>
<name>
<surname>Pan</surname> <given-names>I</given-names>
</name>
<etal/>
</person-group>. <article-title>Evaluation of a convolutional neural network for ovarian tumor differentiation based on magnetic resonance imaging</article-title>. <source>Eur Radiol</source> (<year>2021</year>) <volume>31</volume>(<issue>7</issue>):<page-range>4960&#x2013;71</page-range>. doi: <pub-id pub-id-type="doi">10.1007/s00330-020-07266-x</pub-id>
</citation>
</ref>
<ref id="B19">
<label>19</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>S</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>J</given-names>
</name>
<name>
<surname>Xiong</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Pang</surname> <given-names>P</given-names>
</name>
<name>
<surname>Lei</surname> <given-names>P</given-names>
</name>
<name>
<surname>Zou</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>A radiomic s approach for automated diagnosis of ovarian neoplasm malignancy in computed tomography</article-title>. <source>Sci Rep</source> (<year>2021</year>) <volume>11</volume>(<issue>1</issue>):<fpage>8730</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-021-87775-x</pub-id>
</citation>
</ref>
<ref id="B20">
<label>20</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>T</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>G</given-names>
</name>
<name>
<surname>Hua</surname> <given-names>K</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Duan</surname> <given-names>S</given-names>
</name>
<etal/>
</person-group>. <article-title>Two-dimensional and three-dimensional T2 weighted imaging-based radiomic signatures for the preoperative discrimination of ovarian borderline tumors and malignant tumors</article-title>. <source>J Ovarian Res</source> (<year>2022</year>) <volume>15</volume>(<issue>1</issue>):<fpage>22</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s13048-022-00943-z</pub-id>
</citation>
</ref>
<ref id="B21">
<label>21</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhuang</surname> <given-names>J</given-names>
</name>
<name>
<surname>Cheng</surname> <given-names>M-y</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>L-j</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>X</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>R</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X-a</given-names>
</name>
</person-group>. <article-title>Differential analysis of benign and malignant ovarian tumors based on T2-Dixon hydrographic imaging model</article-title>. <source>J Med Forum</source> (<year>2002</year>) <volume>43</volume>(<issue>09</issue>):<fpage>15</fpage>&#x2013;<lpage>20</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.13437/j.cnki.jcr.2016.03.027</pub-id>
</citation>
</ref>
<ref id="B22">
<label>22</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Mao</surname> <given-names>YF</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>XJ</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>G</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>X</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>P</given-names>
</name>
<etal/>
</person-group>. <article-title>Magnetic resonance imaging radiomics in categorizing ovarian masses and predicting clinical outcome: a preliminary study</article-title>. <source>Eur Radiol</source> (<year>2019</year>) <volume>29</volume>(<issue>7</issue>):<page-range>3358&#x2013;71</page-range>. doi: <pub-id pub-id-type="doi">10.1007/s00330-019-06124-9</pub-id>
</citation>
</ref>
<ref id="B23">
<label>23</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mimura</surname> <given-names>R</given-names>
</name>
<name>
<surname>Kato</surname> <given-names>F</given-names>
</name>
<name>
<surname>Tha</surname> <given-names>KK</given-names>
</name>
<name>
<surname>Kudo</surname> <given-names>K</given-names>
</name>
<name>
<surname>Konno</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Oyama-Manabe</surname>
</name>
<etal/>
</person-group>. <article-title>Comparison between borderline ovarian tumors and carcinomas using semi-automated histogram analysis of diffusion-weighted imaging: focusing on solid components</article-title>. <source>Japanese J Radiol</source> (<year>2016</year>) <volume>34</volume>(<issue>3</issue>):<page-range>229&#x2013;37</page-range>. doi: <pub-id pub-id-type="doi">10.1007/s11604-016-0518-6</pub-id>
</citation>
</ref>
<ref id="B24">
<label>24</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yu</surname> <given-names>XP</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>L</given-names>
</name>
<name>
<surname>Yu</surname> <given-names>HY</given-names>
</name>
<name>
<surname>Zou</surname> <given-names>YW</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>C</given-names>
</name>
<name>
<surname>Jiao</surname> <given-names>JW</given-names>
</name>
<etal/>
</person-group>. <article-title>MDCT-based radiomics features for the differentiation of serous borderline ovarian tumors and serous malignant ovarian tumors</article-title>. <source>Cancer Manag Res</source> (<year>2021</year>) <volume>13</volume>:<page-range>329&#x2013;36</page-range>. doi: <pub-id pub-id-type="doi">10.2147/CMAR.S284220</pub-id>
</citation>
</ref>
<ref id="B25">
<label>25</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>C</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Fang</surname> <given-names>M</given-names>
</name>
<name>
<surname>Zhu</surname> <given-names>C</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>Y</given-names>
</name>
<etal/>
</person-group>. <article-title>A nomogram combining MRI multisequence radiomics and clinical factors for predicting recurrence of high-grade serous ovarian carcinoma</article-title>. <source>J Oncol</source> (<year>2022</year>) <volume>2022</volume>:<fpage>1716268</fpage>. doi: <pub-id pub-id-type="doi">10.1155/2022/1716268</pub-id>
</citation>
</ref>
<ref id="B26">
<label>26</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gore</surname> <given-names>JC</given-names>
</name>
</person-group>. <article-title>Artificial intelligence in medical imaging</article-title>. <source>Magn Reson Imaging</source> (<year>2020</year>) <volume>68</volume>:<page-range>A1&#x2013;4</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.mri.2019.12.006</pub-id>
</citation>
</ref>
<ref id="B27">
<label>27</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Akazawa</surname> <given-names>M</given-names>
</name>
<name>
<surname>Hashimoto</surname> <given-names>K</given-names>
</name>
</person-group>. <article-title>Artificial intelligence in ovarian cancer diagnosis</article-title>. <source>Anticancer Res</source> (<year>2020</year>) <volume>40</volume>(<issue>8</issue>):<page-range>4795&#x2013;800</page-range>. doi: <pub-id pub-id-type="doi">10.21873/anticanres.14482</pub-id>
</citation>
</ref>
<ref id="B28">
<label>28</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>L</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>J</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>L</given-names>
</name>
</person-group>. <article-title>Improved deep learning network based in combination with cost-sensitive learning for early detection of ovarian cancer in color ultrasound detecting system</article-title>. <source>J Med Syst</source> (<year>2019</year>) <volume>43</volume>(<issue>8</issue>):<fpage>251</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s10916-019-1356-8</pub-id>
</citation>
</ref>
<ref id="B29">
<label>29</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sherbet</surname> <given-names>GV</given-names>
</name>
<name>
<surname>Woo</surname> <given-names>WL</given-names>
</name>
<name>
<surname>Dlay</surname> <given-names>S</given-names>
</name>
</person-group>. <article-title>Application of artificial intelligence-based technology in cancer management: A commentary on the deployment of artificial neural networks</article-title>. <source>Anticancer Res</source> (<year>2018</year>) <volume>38</volume>(<issue>12</issue>):<page-range>6607&#x2013;13</page-range>. doi: <pub-id pub-id-type="doi">10.21873/anticanres.13027</pub-id>
</citation>
</ref>
<ref id="B30">
<label>30</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Elias</surname> <given-names>KM</given-names>
</name>
<name>
<surname>Guo</surname> <given-names>J</given-names>
</name>
<name>
<surname>Bast</surname> <given-names>RC</given-names>
<suffix>Jr</suffix>
</name>
</person-group>. <article-title>Early detection of ovarian cancer</article-title>. <source>Hematol Oncol Clin North Am</source> (<year>2018</year>) <volume>32</volume>(<issue>6</issue>):<page-range>903&#x2013;14</page-range>. doi: <pub-id pub-id-type="doi">10.1016/j.hoc.2018.07.003</pub-id>
</citation>
</ref>
<ref id="B31">
<label>31</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname> <given-names>HL</given-names>
</name>
<name>
<surname>Gong</surname> <given-names>TT</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>FH</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>H</given-names>
</name>
<name>
<surname>Xiao</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Hou</surname> <given-names>Y</given-names>
</name>
<etal/>
</person-group>. <article-title>Artificial intelligence performance in image-based ovarian cancer identification: A systematic review and meta-analysis</article-title>. <source>EClinicalMedicine</source> (<year>2022</year>) <volume>53</volume>:<fpage>101662</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eclinm.2022.101662</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>
