<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Archiving and Interchange DTD v2.3 20070202//EN" "archivearticle.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="systematic-review" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Endocrinol.</journal-id>
<journal-title>Frontiers in Endocrinology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Endocrinol.</abbrev-journal-title>
<issn pub-type="epub">1664-2392</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fendo.2025.1485311</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Endocrinology</subject>
<subj-group>
<subject>Systematic Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Deep learning-based optical coherence tomography and retinal images for detection of diabetic retinopathy: a systematic and meta analysis</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Bi</surname>
<given-names>Zheng</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2421901"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Jinju</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2640096"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Liu</surname>
<given-names>Qiongyi</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Fang</surname>
<given-names>Zhaohui</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Endocrinology, The First Affiliated Hospital of Anhui University of Traditional Chinese Medicine</institution>, <addr-line>Hefei, Anhui</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>First Clinical Medical College, Anhui University of Traditional Chinese Medicine</institution>, <addr-line>Hefei, Anhui</addr-line>, <country>China</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Xin &#x2018;an Medical and Chinese Medicine Modernization Research Institute, Hefei Comprehensive National Science Center</institution>, <addr-line>Hefei, Anhui</addr-line>, <country>China</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Kaveh Fadakar, Northwestern University, United States</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Mengxi Shen, University of Miami Health System, United States</p>
<p>Fatma Taher, Zayed University, United Arab Emirates</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Zhaohui Fang, <email xlink:href="mailto:fangzhaohui9097@163.com">fangzhaohui9097@163.com</email>
</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>18</day>
<month>03</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>16</volume>
<elocation-id>1485311</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>08</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>28</day>
<month>02</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Bi, Li, Liu and Fang</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Bi, Li, Liu and Fang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Objective</title>
<p>To systematically review and meta-analyze the effectiveness of deep learning algorithms applied to optical coherence tomography (OCT) and retinal images for the detection of diabetic retinopathy (DR).</p>
</sec>
<sec>
<title>Methods</title>
<p>We conducted a comprehensive literature search in multiple databases including PubMed, Cochrane library, Web of Science, Embase and IEEE Xplore up to July 2024. Studies that utilized deep learning techniques for the detection of DR using OCT and retinal images were included. Data extraction and quality assessment were performed independently by two reviewers. Meta-analysis was conducted to determine pooled sensitivity, specificity, and diagnostic odds ratios.</p>
</sec>
<sec>
<title>Results</title>
<p>A total of 47 studies were included in the systematic review, 10 were meta-analyzed, encompassing a total of 188268 retinal images and OCT scans. The meta-analysis revealed a pooled sensitivity of 1.88 (95% CI: 1.45-2.44) and a pooled specificity of 1.33 (95% CI: 0.97-1.84) for the detection of DR using deep learning models. All of the outcome of deep learning-based optical coherence tomography ORs &#x2265;0.785, indicating that all included studies with artificial intelligence assistance produced good boosting results.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>Deep learning-based approaches show high accuracy in detecting diabetic retinopathy from OCT and retinal images, supporting their potential as reliable tools in clinical settings. Future research should focus on standardizing datasets, improving model interpretability, and validating performance across diverse populations.</p>
</sec>
<sec>
<title>Systematic Review Registration</title>
<p>
<uri xlink:href="https://www.crd.york.ac.uk/PROSPERO/">https://www.crd.york.ac.uk/PROSPERO/</uri>, identifier CRD42024575847.</p>
</sec>
</abstract>
<kwd-group>
<kwd>meta analysis</kwd>
<kwd>deep learning</kwd>
<kwd>diabetic retinopathy</kwd>
<kwd>image detection</kwd>
<kwd>optical coherence tomography</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="45"/>
<page-count count="11"/>
<word-count count="4760"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Clinical Diabetes</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<title>Introduction</title>
<p>Diabetic retinopathy (DR) is one of the most common microvascular complications of diabetes and a leading cause of blindness in adults worldwide (<xref ref-type="bibr" rid="B1">1</xref>). As the prevalence of diabetes continues to rise globally, the incidence of DR is also increasing significantly. Retinal vascular abnormalities, which are hallmarks of DR, gradually lead to a decline in patients&#x2019; vision and, in severe cases, can cause blindness (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). Given the current medical capabilities, the disease cannot be completely cured; treatment focuses on maintaining the patient&#x2019;s existing level of vision. If DR can be diagnosed and treated early, in most cases, patients&#x2019; vision can be preserved. Early detection and timely treatment of DR are crucial for preventing vision loss (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<p>Traditionally, the detection of DR relies on ophthalmologists&#x2019; manual evaluation of retinal images (<xref ref-type="bibr" rid="B4">4</xref>). However, this method is time-consuming, labor-intensive, and subject to variability due to the experience and subjective judgment of the evaluators, leading to inconsistent detection outcomes (<xref ref-type="bibr" rid="B5">5</xref>). Currently, most ophthalmologists still use traditional methods to diagnose diabetic retinopathy (DR) by analyzing the presence and types of abnormalities in retinal images. Microaneurysms (MIA), hemorrhages (HEM), soft exudates (SOX), and hard exudates (HEX) are the four most common types of lesions (<xref ref-type="bibr" rid="B3">3</xref>&#x2013;<xref ref-type="bibr" rid="B5">5</xref>).</p>
<p>Manual detection of diabetic retinopathy (DR) images presents several issues. First, interpreting DR images requires trained ophthalmologists, but in underdeveloped countries, there is a severe shortage of ophthalmologists, leading to many patients being unable to receive timely screening and treatment (<xref ref-type="bibr" rid="B6">6</xref>). Additionally, the cost of DR examinations is high, making it unaffordable for many patients and causing them to miss the opportunity for early intervention. These issues contribute to the high prevalence and risk of blindness associated with DR (<xref ref-type="bibr" rid="B7">7</xref>). Since timely detection is crucial in preventing vision loss, scientists and engineers have been working to design automated methods to achieve accurate and rapid diagnosis and treatment. Automated methods not only address the shortage of human resources but also significantly reduce the cost of screening, benefiting more patients (<xref ref-type="bibr" rid="B8">8</xref>). In recent years, with the rapid development of machine learning (ML) and artificial intelligence (AI) technologies, ML models trained on a large number of fundus images have achieved high accuracy in automated DR classification (<xref ref-type="bibr" rid="B9">9</xref>). These models can quickly and efficiently analyze large volumes of images, allowing for a substantial number of screenings to be completed in a short time. To further improve detection performance, substantial effort has been invested in developing automated methods that are both efficient and cost-effective. These methods not only consider the accuracy of detection but also emphasize ease of use and cost control, making them more suitable for implementation in resource-limited settings (<xref ref-type="bibr" rid="B10">10</xref>).</p>
<p>Recently, advancements in optical coherence tomography (OCT) and retinal imaging technology have provided high-resolution image data for the early detection of DR (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B10">10</xref>). OCT technology can generate detailed three-dimensional images of the retina, revealing subtle lesion features, allowing for detection of abnormalities at an early stage of the disease. These high-resolution image data greatly enhance the performance of automated detection systems, enabling more accurate identification and classification of DR lesions, thereby providing timely and effective treatment recommendations for patients (<xref ref-type="bibr" rid="B11">11</xref>). These images can capture minute changes in the retina, enabling more accurate detection of DR. With the rapid development of deep learning technology, significant breakthroughs have been achieved in the field of computer vision. Deep learning algorithms, particularly convolutional neural networks (CNNs), have demonstrated exceptional performance in image recognition and classification tasks and have been widely applied in medical image analysis. Kazakh-British et&#xa0;al. (<xref ref-type="bibr" rid="B11">11</xref>)conducted experimental research using relevant processing pipelines to extract arteries from fundus images and then trained CNN models to identify lesions (<xref ref-type="bibr" rid="B9">9</xref>). Alexandr et&#xa0;al. (<xref ref-type="bibr" rid="B12">12</xref>) compared two widely used classical designs (DenseNet and ResNet) with a new enhanced structure (EfficientNet) in their other work. Previous studies have shown that deep learning-based models can automatically analyze OCT and retinal images, accurately identifying and classifying different stages of DR (<xref ref-type="bibr" rid="B13">13</xref>).</p>
<p>Despite numerous studies exploring the application of deep learning in DR detection, their results and conclusions often vary, and a unified perspective has yet to emerge. Therefore, there is a need for a systematic review and meta-analysis to comprehensively evaluate the effectiveness of deep learning-based OCT and retinal image analysis for DR detection, clarifying its clinical value and future directions.</p>
<p>This study aims to systematically review and meta-analyze existing research to assess the accuracy and reliability of deep learning models in detecting DR from OCT and retinal images and to identify key factors influencing detection performance. Through this research, we hope to provide scientific evidence for clinical practice and promote the application and popularization of deep learning technology in ophthalmic diagnostics.</p>
</sec>
<sec id="s2" sec-type="materials|methods">
<title>Materials and methods</title>
<p>Reporting of this review and meta-analysis followed the PRISMA checklist. The study protocol was registered after the initial screening stage. The design of the inclusion and exclusion criteria of this study was based on the five main principles of the Participant-Intervention-Comparator-Outcomes-Study (PICOS) design search principle (<xref ref-type="bibr" rid="B14">14</xref>). Our PICO question was as follows: In deep learning applications developed based on retinal images for early screening of diabetic retinopathy (Participants), how does DL (Intervention) compare with traditional landmarks by a single expert or with scripted eye care provider referral and education (Control) in terms of accuracy (Outcome). The systematic evaluation program is registered on the International Prospective Systems Evaluation website (PROSPERO-CRD42024575847).</p>
<sec id="s2_1">
<title>Inclusion criteria</title>
<p>The included patients all had diabetic retinopathy, regardless of age, sex, or race. The control group received conventional basic treatment (e.g., scripted eye care provider referral and education). The treatment group was treated with deep learning-based optical coherence tomography and retinal images (color fundus photography). The primary outcome indicators were as follows: diabetic eye exam completion rate, the proportion of participants who completed follow-through with an eye care provider, and DR classification accuracy. The types of included literature were randomized controlled trials (RCTs) and observational studies, with no restrictions on language, blinding, or allocation concealment requirements. Any study approved by the local institution was included in the scope of this study and registered in the international database.</p>
</sec>
<sec id="s2_2">
<title>Exclusion criteria</title>
<p>Self-control studies, case reports, literature reviews, duplicate publications, experience summaries, animal experiment research, studies with incomplete data, studies involving patients with other diseases, studies lacking clear diagnostic or efficacy evaluation standards, and studies combining other therapies different from the control group were excluded.</p>
</sec>
<sec id="s2_3">
<title>Information sources</title>
<p>We systematically screened five electronic databases(Cochrane library, PubMed, Embase, IEEE Xplore, Web of science) for studies published up January 2017 to July 2024. Search terms included Coherence Tomography, Optical, Optical Coherence Tomography, OCT Tomography, Tomography, OCT (Spectral Domain OCT (SD-OCT). This allowed for a high-resolution 3D imaging of the retinal layers and provided detailed information for the deep learning model analysis), Diabetic Retinopathies, Retinopathies, Diabetic, Retinopathy, Diabetic, Deep learning-based,Deep learning. A two-pronged search strategy, combining the technique of interest (AI, CNN, DL, etc.) and the diagnostic target, was applied. The best effort was made to ensure the comprehensiveness of the preliminary search work so as not to lose valuable research data. According to the search modes of different databases, keywords could be combined with free words for a comprehensive search.</p>
</sec>
<sec id="s2_4">
<title>Data collection, items, and study selection</title>
<p>Based on the electronic database search strategy outlined above, two researchers conducted searches in both Chinese and English electronic databases. They used EndNote X7 software to identify and remove duplicate studies, integrated the search results from the different databases, created an information database, and downloaded the full texts of the relevant studies. Subsequently, two researchers independently performed preliminary screening and extracted data according to a pre-defined table. They cross-checked and reviewed the extracted data, recorded the reasons for excluding each study, and consulted third-party experts to resolve differing opinions and reach a final decision. The data extraction encompassed fundamental details from the included studies (e.g., first author and publication year), pertinent information about the experimental and control groups (such as case numbers, intervention measures, and outcome indicators), and the study design along with quality assessment data (including randomization methods, blinding procedures, allocation concealment, completeness of outcome data, selective reporting, and other sources of bias). The search strategy was as follows: (((Coherence Tomography, Optical[MeSH Terms]) OR Optical Coherence Tomography[MeSH Terms]) OR OCT Tomography[MeSH Terms]) OR Tomography, OCT[MeSH Terms] AND ((Diabetic Retinopathies[MeSH Terms]) OR Retinopathies, Diabetic[MeSH Terms]) OR (Retinopathy, Diabetic[MeSH Terms]) AND (Deep learning-based [MeSH Terms]) OR (Deep learning[MeSH Terms]).</p>
</sec>
<sec id="s2_5">
<title>Quality assessment</title>
<p>The methodological quality of the included studies was assessed using Cochrane&#x2019;s revised risk of bias tool for randomized trials (RoB 2.0) (<xref ref-type="bibr" rid="B15">15</xref>). This evaluation covered various aspects including the randomization process, deviations from intended interventions, missing outcome data, outcome measurement, and the selection of reported result areas. Each evaluation module consists of several signal questions, with possible responses being: Y (yes), PY (probably yes), PN (probably no), N (no), and NI (no information). Risk of bias was assessed independently by two reviewers, who discussed their findings in case of disagreement to come to aconsensus. We do not provide further guidance as to the certainty of the evidence (e.g., using any kind of grading), but provide descriptive statistics of the individual and overall risk of bias together with meta-analytic estimates.</p>
</sec>
<sec id="s2_6">
<title>Statistical methods and data synthesis</title>
<p>First, the authors used RevMan5.4 software to analyze the publication bias of the literature. Second, for the direct comparison results, the authors used Stata17.0 software for data merging, statistical analysis and meta-analysis. In Stata17.0, the meta package was used to perform meta-analysis. The relevant commands were executed to analyze data with both fixed-effect and random-effects models. The meta package provided functionalities for computing heterogeneity statistics, generating forest plots, and creating funnel plots. For meta-regression analysis to explore sources of heterogeneity, the metareg package was utilized with specific covariates. The analysis involved using these packages to compare different interventions and to map network meta-analysis results with random-effects model data. Significance was determined using P&#x2009;&lt;&#x2009;0.05 and 95% confidence intervals (95% CIs). For efficacy analysis, odds ratios (OR) were used for count data, while measurement data employed either the weighted mean difference or the standardized mean difference (mean difference, MD). Each effect size was reported with a 95% CI (<xref ref-type="bibr" rid="B16">16</xref>).</p>
</sec>
<sec id="s2_7">
<title>Assessment of heterogeneity</title>
<p>The heterogeneity was graded using I<sup>2</sup> according to the recommendations of the Cochrane Handbook (<xref ref-type="bibr" rid="B17">17</xref>). Cochrane&#x2019;s Q test was used to detect whether there was a significant difference in effect sizes between studies. The Q statistic followed a chi-squared distribution, and the P-value was used to determine the significance of heterogeneity. If the P-value was significant (typically &lt; 0.05), it indicated substantial heterogeneity. The I&#xb2;index statistic represented the percentage of total variation due to heterogeneity. The I&#xb2;index ranged from 0% to 100%, with higher values indicating greater heterogeneity. Generally, 0% to 25% suggested low heterogeneity, 25% to 50% indicated moderate heterogeneity, 50% to 75% indicated substantial heterogeneity, and 75% to 100% indicated considerable heterogeneity.</p>
<p>The clinical and methodological heterogeneity of the included studies was evaluated, and the levels of fit of the fixed-effects model and the random-effects model were compared (<xref ref-type="bibr" rid="B18">18</xref>). In the absence of significant clinical heterogeneity (P&#x2009;&#x2265;&#x2009;0.1, I<sup>2</sup>&#x2009;&#x2264;&#x2009;50%), a fixed-effects model was used for meta-analysis. If there was significant clinical heterogeneity between the results of each study (P&#x2009;&lt;&#x2009;0.1, I<sup>2</sup>&#x2009;&gt;&#x2009;50%), the source of the heterogeneity was first analyzed, the influence of clinical or methodological heterogeneity was excluded and the random-effects model was used for the meta-analysis. When the data provided by the clinical trial could not be meta-analyzed, they were subjected to a descriptive analysis.</p>
</sec>
<sec id="s2_8">
<title>Publication bias</title>
<p>According to the recommendations of the Cochrane Handbook, the RevMan 5.4 software was used to analyze potential publication bias (<xref ref-type="bibr" rid="B19">19</xref>). Typically, in the absence of publication bias, a funnel plot should appear symmetrical. If the funnel plot is asymmetrical, it may indicate the presence of publication bias. Egger&#x2019;s regression test was performed using Stata 17.0 to calculate publication bias. This test assesses the symmetry of the funnel plot through regression analysis to quantitatively detect publication bias. If the intercept of the regression significantly deviates from zero, it suggests the presence of publication bias (<xref ref-type="bibr" rid="B20">20</xref>).</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<title>Results</title>
<sec id="s3_1">
<title>Study selection and characteristics</title>
<p>From 478 identified studies, 258 were screened in full texts, and 10 studies were eventually included in our review and meta analysis. The report flowchart is shown in <xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>. The treatment group included 8 Artificial intelligence (AI)-based algorithm, Inoveon Diabetic Retinopathy (DR-3DT) system and Nonmydriatic ultra-widefield (NM UWF) screening. The basic characteristics of the included studies are shown in <xref ref-type="table" rid="T1">
<bold>Tables&#xa0;1</bold>
</xref> and <xref ref-type="table" rid="T2">
<bold>2</bold>
</xref>. Three studies focused on the analysis of retinal images, which used own data. Seven studies used publicly available data from cohort studies. 86% of the DL models were built using CNN algorithms, with one study using Inception-V4 and five studies using Inception-V3. The outcome measures of the studies all evaluated the accuracy and sensitivity of DL in monitoring diabetic retinopathy, furthermore, ETDRS macular edema stage the &#x201c;gold standard&#x201d; for the evaluation of diabetic retinopathy.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>The flow chart of literature screening.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fendo-16-1485311-g001.tif"/>
</fig>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Characteristics of the selected studies included in the systematic review and meta-analysis.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Author(year)</th>
<th valign="top" align="center">Type of study</th>
<th valign="top" align="center">Treatment measures</th>
<th valign="top" align="center">Sample size (invention/control)</th>
<th valign="top" align="center">Age (years)</th>
<th valign="top" align="center">Outcome indicators</th>
<th valign="top" align="center">Treatment time</th>
<th valign="top" align="center">Reference</th>
<th valign="top" align="center">Jadad Scale</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Risa M 2024</td>
<td valign="top" align="center">Randomized control trial</td>
<td valign="top" align="center">Artificial intelligence (AI)</td>
<td valign="top" align="center">164(81/83)</td>
<td valign="top" align="center">8-21</td>
<td valign="top" align="left">diabetic eye exam completion rate; the proportion of participants</td>
<td valign="top" align="center">6 months</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B21">21</xref>)</td>
<td valign="top" align="center">4</td>
</tr>
<tr>
<td valign="top" align="center">Selina L 2023</td>
<td valign="top" align="center">Randomized control trial</td>
<td valign="top" align="center">Nonmydriatic ultra-widefield (NM UWF) screening</td>
<td valign="top" align="center">658(335/323)</td>
<td valign="top" align="center">30-61</td>
<td valign="top" align="left">the proportions of AED between groups</td>
<td valign="top" align="center">12 months</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B22">22</xref>)</td>
<td valign="top" align="center">3</td>
</tr>
<tr>
<td valign="top" align="center">Stephen R 2002</td>
<td valign="top" align="center">Observational study</td>
<td valign="top" align="center">Inoveon Diabetic Retinopathy (DR-3DT) system</td>
<td valign="top" align="center">290(145/145)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values) of the digital system</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B23">23</xref>)</td>
<td valign="top" align="center">4</td>
</tr>
<tr>
<td valign="top" align="center">Huma N 2022</td>
<td valign="top" align="center">Randomized control trial</td>
<td valign="top" align="center">Automated unsupervised deep learning</td>
<td valign="top" align="center">Data set: 3662</td>
<td valign="top" align="center">24-74</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B24">24</xref>)</td>
<td valign="top" align="center">3</td>
</tr>
<tr>
<td valign="top" align="center">Wang Y 2021</td>
<td valign="top" align="center">Observational study</td>
<td valign="top" align="center">Artificial intelligence (AI)-based algorithm</td>
<td valign="top" align="center">Data set:12252</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B25">25</xref>)</td>
<td valign="top" align="center">3</td>
</tr>
<tr>
<td valign="top" align="center">Alwakid G 2023</td>
<td valign="top" align="center">Observational study</td>
<td valign="top" align="center">Artificial intelligence (AI)-based algorithm</td>
<td valign="top" align="center">Data set:9952</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B26">26</xref>)</td>
<td valign="top" align="center">3</td>
</tr>
<tr>
<td valign="top" align="center">Mehboob A 2022</td>
<td valign="top" align="center">Observational study</td>
<td valign="top" align="center">Artificial intelligence (AI)-based algorithm</td>
<td valign="top" align="center">Data set:96213</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B27">27</xref>)</td>
<td valign="top" align="center">4</td>
</tr>
<tr>
<td valign="top" align="center">Li F 2022</td>
<td valign="top" align="center">Observational study</td>
<td valign="top" align="center">Artificial intelligence (AI)-based algorithm</td>
<td valign="top" align="center">Data set:8739</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B28">28</xref>)</td>
<td valign="top" align="center">3</td>
</tr>
<tr>
<td valign="top" align="center">Surya J 2023</td>
<td valign="top" align="center">Randomized control trial</td>
<td valign="top" align="center">Artificial intelligence (AI)-based algorithm</td>
<td valign="top" align="center">723(382/341)</td>
<td valign="top" align="center">35-65</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">6 months</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B29">29</xref>)</td>
<td valign="top" align="center">3</td>
</tr>
<tr>
<td valign="top" align="center">Mansour R 2017</td>
<td valign="top" align="center">Observational study</td>
<td valign="top" align="center">Artificial intelligence (AI)-based algorithm</td>
<td valign="top" align="center">Data set:35126</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B30">30</xref>)</td>
<td valign="top" align="center">4</td>
</tr>
<tr>
<td valign="top" align="center">Nunez d 2022</td>
<td valign="top" align="center">Observational study</td>
<td valign="top" align="center">Artificial intelligence (AI)-based algorithm</td>
<td valign="top" align="center">Data set:20489</td>
<td valign="top" align="center">53-67</td>
<td valign="top" align="center">Accuracy (sensitivity, specificity, predictive values)</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B3">3</xref>)</td>
<td valign="top" align="center">2</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Characteristics of the selected studies (Artificial intelligence (AI)-based algorithm).</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Author (year)</th>
<th valign="top" align="center">Type of machine learning models</th>
<th valign="top" align="center">Imaging modality</th>
<th valign="top" align="center">Imaging pattern</th>
<th valign="top" align="center">Accuracy of result</th>
<th valign="top" align="center">Sensitivity of result</th>
<th valign="top" align="center">Reference</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Risa M 2024</td>
<td valign="top" align="center">autonomous AI system</td>
<td valign="top" align="center">fundus images</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">100%</td>
<td valign="top" align="center">78.9%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B21">21</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Selina L 2023</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">OCT images</td>
<td valign="top" align="center">100&#xb0;, 200&#xb0;</td>
<td valign="top" align="center">99.1%</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B22">22</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Stephen R 2002</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">fundus images</td>
<td valign="top" align="center">30&#xb0;, 1152 &#xd7; 1152 pixels</td>
<td valign="top" align="center">98.2%</td>
<td valign="top" align="center">89.7%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B23">23</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Huma N 2022</td>
<td valign="top" align="center">the fuzzy clustering method, deep embedded clustering, and k-means for generalizability</td>
<td valign="top" align="center"/>
<td valign="top" align="center"/>
<td valign="top" align="center">98.66%</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B24">24</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Wang Y 2021</td>
<td valign="top" align="center">CNN</td>
<td valign="top" align="center">color fundus photography</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">90.6%</td>
<td valign="top" align="center">90.6%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B25">25</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Alwakid G 2023</td>
<td valign="top" align="center">Inception-V3, CNN</td>
<td valign="top" align="center">high-resolution retinal pictures</td>
<td valign="top" align="center">3216 &#xd7; 2136 pixels</td>
<td valign="top" align="center">98%</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B26">26</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Mehboob A 2022</td>
<td valign="top" align="center">RFT, CNN</td>
<td valign="top" align="center">color fundus photography</td>
<td valign="top" align="center">90&#xb0;, 180&#xb0;</td>
<td valign="top" align="center">83.78%</td>
<td valign="top" align="center">78.55%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B27">27</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Li F 2022</td>
<td valign="top" align="center">CNN</td>
<td valign="top" align="center">color fundus photography</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">90.21%</td>
<td valign="top" align="center">93.24%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B28">28</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Surya J 2023</td>
<td valign="top" align="center">CNN</td>
<td valign="top" align="center">underwent fundus photographs</td>
<td valign="top" align="center">45&#xb0;</td>
<td valign="top" align="center">89.75%</td>
<td valign="top" align="center">83.33%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B29">29</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Mansour R 2017</td>
<td valign="top" align="center">DNN, CNN</td>
<td valign="top" align="center">color fundus photography</td>
<td valign="top" align="center">&#x2013;</td>
<td valign="top" align="center">90.15%</td>
<td valign="top" align="center">91.3%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B30">30</xref>)</td>
</tr>
<tr>
<td valign="top" align="center">Nunez d 2022</td>
<td valign="top" align="center">CNN</td>
<td valign="top" align="center">color retinal images</td>
<td valign="top" align="center">40&#xb0;</td>
<td valign="top" align="center">92.56%</td>
<td valign="top" align="center">91.22%</td>
<td valign="top" align="center">(<xref ref-type="bibr" rid="B3">3</xref>)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>CNN, Convolutional Neural Network; Inception-V3, CNN, Inception-V3 Convolutional Neural Network; RFT, Random Forest Tree; Inception-V4, Inception-V4 Convolutional Neural Network; Dr Noon AI, Doctor Noon Artificial Intelligence; DNN, Deep Neural Network; CNN, Convolutional Neural Network; VISUHEALTH-AI DR, VISUHEALTH Artificial Intelligence for Diabetic Retinopathy.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>Notably, many studies employed multiple test datasets. The reference test in the training dataset was established by two experts in 7 studies.</p>
</sec>
<sec id="s3_2">
<title>Risk of bias and applicability concerns</title>
<p>Among the 10 included studies, 4 were double-arm randomized controlled trials (RCTs) and 6 were observational studies. In the 7 evaluation modules, 5 were rated as low risk. In the assessment of missing outcome data and data integrity, all 10 studies were rated as low risk according to the RoB 2.0 evaluation results, indicating good quality and complete data in the included literature. However, two studies were assessed as high risk regarding participant details because they used cohort reporting and did not provide specific information on participant age and other demographics. The risk of research bias is expressed as a percentage of all the included studies, as shown in <xref ref-type="fig" rid="f2">
<bold>Figures&#xa0;2</bold>
</xref> and <xref ref-type="fig" rid="f3">
<bold>3</bold>
</xref>.</p>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Risk of bias graph in the included studies.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fendo-16-1485311-g002.tif"/>
</fig>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>Risk of bias summary in the included studies.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fendo-16-1485311-g003.tif"/>
</fig>
</sec>
<sec id="s3_3">
<title>Meta-analysis</title>
<p>Two meta-analyses were performed, one synthesizing the effectiveness of imaging to screen for Diabetic Eye Disease (<xref ref-type="fig" rid="f4">
<bold>Figure&#xa0;4</bold>
</xref>) and one on the proportion of Deep-learning-based automatic computer-aided diagnosis system for diabetic retinopathy (<xref ref-type="fig" rid="f5">
<bold>Figure&#xa0;5</bold>
</xref>).</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Forest plot of studies reporting the effectiveness of imaging to screen for Diabetic Eye Disease (primary outcome).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fendo-16-1485311-g004.tif"/>
</fig>
<p>In the comparison of the accuracy of diagnosing diabetic retinopathy using deep learning-based optical coherence tomography and retinal images, the results were shown in <xref ref-type="fig" rid="f4">
<bold>Figures&#xa0;4</bold>
</xref>, <xref ref-type="fig" rid="f5">
<bold>5</bold>
</xref>, and <xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>. A random-effects model was used when I2 &gt; 50. The forest plot results showed that, compared to standard care, autonomous artificial intelligence improved the completion rate of diabetic eye exams in adolescents with diabetes [OR = 1.88, 95% CI = (1.45, 2.44), p = 0.031]. The overall detection accuracy with the assistance of artificial intelligence also showed significant improvement compared to traditional methods [OR = 1.33, 95% CI = (0.97, 1.84), p &lt; 0.001]. All of the outcome of deep learning-based optical coherence tomography had ORs &#x2265;0.785, indicating that all included studies with artificial intelligence assistance produced good boosting results (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>).</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Forest plot of studies reporting the effectiveness of imaging to screen for Diabetic Eye Disease (secondary outcome).</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fendo-16-1485311-g005.tif"/>
</fig>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>The outcome of deep learning-based optical coherence tomography (OR,95%CI).</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="center">Id</th>
<th valign="top" align="center">OR lower</th>
<th valign="top" align="center">OR-OR lower</th>
<th valign="top" align="center">OR</th>
<th valign="top" align="center">OR upper-OR</th>
<th valign="top" align="center">OR upper</th>
<th valign="top" align="center">P-value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="center">Huma N 2022 (<xref ref-type="bibr" rid="B24">24</xref>)</td>
<td valign="middle" align="center">0.9753</td>
<td valign="middle" align="center">0.0113</td>
<td valign="middle" align="center">0.9866</td>
<td valign="middle" align="center">0.0061</td>
<td valign="middle" align="center">0.9927</td>
<td valign="middle" align="center">&lt;0.001</td>
</tr>
<tr>
<td valign="top" align="center">Wang Y 2021 (<xref ref-type="bibr" rid="B25">25</xref>)</td>
<td valign="middle" align="center">0.9016</td>
<td valign="middle" align="center">0.0207</td>
<td valign="middle" align="center">0.9223</td>
<td valign="middle" align="center">0.0347</td>
<td valign="middle" align="center">0.957</td>
<td valign="middle" align="center">&lt;0.001</td>
</tr>
<tr>
<td valign="top" align="center">Alwakid G 2023 (<xref ref-type="bibr" rid="B26">26</xref>)</td>
<td valign="middle" align="center">0.808</td>
<td valign="middle" align="center">0.073</td>
<td valign="middle" align="center">0.881</td>
<td valign="middle" align="center">0.106</td>
<td valign="middle" align="center">0.987</td>
<td valign="middle" align="center">&lt;0.001</td>
</tr>
<tr>
<td valign="top" align="center">Mehboob A 2022 (<xref ref-type="bibr" rid="B27">27</xref>)</td>
<td valign="middle" align="center">0.7308</td>
<td valign="middle" align="center">0.0542</td>
<td valign="middle" align="center">0.785</td>
<td valign="middle" align="center">0.0528</td>
<td valign="middle" align="center">0.8378</td>
<td valign="middle" align="center">&lt;0.001</td>
</tr>
<tr>
<td valign="top" align="center">Li F 2022 (<xref ref-type="bibr" rid="B28">28</xref>)</td>
<td valign="middle" align="center">0.916</td>
<td valign="middle" align="center">0.009</td>
<td valign="middle" align="center">0.925</td>
<td valign="middle" align="center">0.011</td>
<td valign="middle" align="center">0.936</td>
<td valign="middle" align="center">&lt;0.001</td>
</tr>
<tr>
<td valign="top" align="center">Surya J 2023 (<xref ref-type="bibr" rid="B29">29</xref>)</td>
<td valign="middle" align="center">0.7755</td>
<td valign="middle" align="center">0.0745</td>
<td valign="middle" align="center">0.85</td>
<td valign="middle" align="center">0.0745</td>
<td valign="middle" align="center">0.9245</td>
<td valign="middle" align="center">&lt;0.001</td>
</tr>
<tr>
<td valign="top" align="center">Mansour R 2017 (<xref ref-type="bibr" rid="B30">30</xref>)</td>
<td valign="middle" align="center">0.9015</td>
<td valign="middle" align="center">0.04</td>
<td valign="middle" align="center">0.9415</td>
<td valign="middle" align="center">0.0378</td>
<td valign="middle" align="center">0.9793</td>
<td valign="middle" align="center">&lt;0.001</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Heterogeneity was assessed using funnel plots and the Egger test. The funnel plots in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Figures S1</bold>
</xref> and <xref ref-type="supplementary-material" rid="SM1">
<bold>S3</bold>
</xref> were relatively symmetrical, with the effect sizes of the studies evenly distributed around the overall effect size. Egger&#x2019;s test, a regression test used for quantitatively assessing publication bias, showed p-values of 0.686 (&gt; 0.05) in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Figure S2</bold>
</xref> and 0.569 (&gt; 0.05) in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Figure S4</bold>
</xref>, indicating that the symmetry of the funnel plots was not significant and the likelihood of publication bias was low, suggesting no heterogeneity.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<title>Discussion</title>
<p>Currently, the assessment of the severity of diabetic retinopathy in patients heavily relies on manual interpretation of retinal fundus images, which poses significant challenges (<xref ref-type="bibr" rid="B31">31</xref>). Therefore, automated image grading systems play a crucial role in the early diagnosis and evaluation of these vision-threatening diseases. For example, deep learning algorithms and image processing techniques can analyze large volumes of fundus images, providing consistent and highly accurate diagnostic results, reducing human error, and improving diagnostic accuracy (<xref ref-type="bibr" rid="B32">32</xref>&#x2013;<xref ref-type="bibr" rid="B34">34</xref>). By regularly collecting and analyzing patients&#x2019; fundus images, automated image grading systems can continuously monitor the progression of diabetic retinopathy, assisting doctors in timely adjusting treatment plans to achieve the best therapeutic outcomes (<xref ref-type="bibr" rid="B34">34</xref>). Multiple studies (<xref ref-type="bibr" rid="B35">35</xref>&#x2013;<xref ref-type="bibr" rid="B37">37</xref>) have shown that deep learning algorithms can be used to generate expert-level grading diagnoses for retinal fundus images. However, these methods often achieve good performance at the expense of increased time complexity. Due to the same input image size in these independent models, the robustness of their classification is relatively poor. Therefore, this study employs a systematic review and meta-analysis to analyze the role of deep learning-based optical coherence tomography and retinal images in the detection of diabetic retinopathy.</p>
<p>The results of this meta-analysis confirmed that, compared to standard care, autonomous artificial intelligence improved the completion rate of diabetic eye exams in adolescents with diabetes [OR=1.88, 95% CI=(1.45, 2.44), p=0.031]. Risa M et&#xa0;al. (<xref ref-type="bibr" rid="B21">21</xref>) were the first to assess the role of artificial intelligence in narrowing the care gap among racially and ethnically diverse adolescent diabetic patients. The study indicated that closing the care gap for diabetic eye exams, as measured by MIPS and HEDIS quality indicators, was a crucial component of value-based care. The results suggested that autonomous artificial intelligence could help meet these historically challenging benchmarks, particularly among racially/ethnically diverse and resource-limited youth. Li et&#xa0;al. (<xref ref-type="bibr" rid="B28">28</xref>) confirmed that, although deep learning (DL) detection often showed larger deviations at points such as the porion, subspinale, gonion, articulare, and anterior nasal spine, DL might not exceed expert detection accuracy but could clearly assist both regular and experienced examiners in landmark detection. Training models on larger datasets might have eventually helped achieve or surpass expert accuracy. The results indicated that DL models included in the studies achieved an accuracy above 83% for identifying diabetic retinopathy. A total of 71% of established DL research models had detection accuracies exceeding 90%. Mehboob A et&#xa0;al. (<xref ref-type="bibr" rid="B27">27</xref>) proposed a DL architecture consisting of three phases: image pre-processing, feature extraction, and classification. Deep convolutional networks (CNNs) were trained to extract deep features. Heat maps extracted from the proposed framework highlighted the presence of any exudates, microaneurysms, hemorrhages, cotton wool spots, or new vessels, indicating feature extraction from the affected region and achieving high accuracy. Deep CNNs could take unknown images as input and extract problem-specific features, thereby generating an appropriate response. The results showed that the proposed technique outperformed existing ones in terms of sensitivity. Even with a lighter CNN architecture, it demonstrated competitive accuracy. Moreover, among ensemble-based architectures, the proposed framework achieved the highest accuracy using average pooling when trained on an augmented dataset. F. Mansour Romany (<xref ref-type="bibr" rid="B30">30</xref>)used deep convolutional networks to classify data into normal and diseased categories with an accuracy of 97.93%.</p>
<p>Common deep learning ensemble algorithm classifiers include Random Forest, Support Vector Machines (SVM), Neural Networks, K-Nearest Neighbors (KNN), Multilayer Perceptrons, Naive Bayes, Decision Trees, and Logistic Regression. In 2021, an ensemble-based machine learning algorithm was proposed (<xref ref-type="bibr" rid="B38">38</xref>), which combined three different classifiers: Random Forest, Support Vector Machines (SVM), and Neural Networks, with a meta-classifier for decision-making. This ensemble-based approach enhanced the robustness and performance of the algorithm. The algorithm was tested on the Messidor dataset and achieved an accuracy of 0.75. Another ensemble-based algorithm for diabetic retinopathy screening was proposed by Nagi, A in 2021 (<xref ref-type="bibr" rid="B39">39</xref>). This algorithm employed a two-stage classifier, where the first stage consisted of outputs from six classifiers: SVM, KNN, Multilayer Perceptron, Naive Bayes, Decision Trees, and Logistic Regression, followed by a second stage using a Neural Network to make the final decision based on the classifier outputs. The algorithm achieved a test accuracy of 76.40% on the Messidor dataset. In 2020, an ensemble-based deep neural network architecture was established. This model used ResNet (<xref ref-type="bibr" rid="B40">40</xref>) and leveraged four ResNets to perform binary classification among five categories of diabetic retinopathy: normal vs. mild DR, normal vs. moderate DR, normal vs. severe DR, and normal vs. proliferative DR. The results from each classifier in Stage 1 were then processed by an AdaBoost classifier in Stage 2 to obtain the final classification results. The algorithm was evaluated on the Kaggle dataset APTOS 3662 retinal images, resulting in an accuracy of 61.9%.</p>
<p>The study results indicated that to enhance the accuracy of artificial intelligence in diabetic retinopathy detection and assessment, an automated algorithm should have followed a two-step strategy (<xref ref-type="bibr" rid="B41">41</xref>). The first method involved automatically defining the acceptability of retinal images to determine if they qualified for automatic grading, and then only applying the automated algorithm if the retinal images passed the acceptability test. The second method suggested that to ensure global applicability of automatic grading, the development of automated algorithms should have used images that reflected the specific acquisition conditions in real-world programs, allowing the model to understand and leverage these unique characteristics. Among the most commonly used DL models in research, CNN included two different convolutional neural network (CNN) architectures: Inception-v3 and Inception-v4 (<xref ref-type="bibr" rid="B42">42</xref>). These architectures had significant differences in design and performance. Inception-v3 enhanced feature extraction capabilities mainly through improved Inception modules, which included multiple parallel convolutional and pooling layers, as well as 1x1 convolutions to reduce computational complexity. It also introduced batch normalization and separable convolutions to accelerate training and improve efficiency. In contrast, Inception-v4 built upon Inception-v3 by integrating residual networks (Residual Networks), introducing Inception-ResNet and Reduction-ResNet modules (<xref ref-type="bibr" rid="B43">43</xref>&#x2013;<xref ref-type="bibr" rid="B45">45</xref>). These improvements gave Inception-v4 deeper network layers and better feature extraction capabilities, while residual connections addressed gradient vanishing issues in deep networks, enhancing training stability. Although Inception-v3 performed excellently in various computer vision tasks, Inception-v4 generally offered higher accuracy and faster training speed. Li et&#xa0;al. (<xref ref-type="bibr" rid="B28">28</xref>) developed an improved Inception-v4 network based on stem, inception, and reduction modules, and created an ensemble of five classification model instances based on this Inception-v4 network. Its performance level was comparable to or exceeded that of ophthalmologists, achieving excellent results on the primary dataset used. The detection accuracy was comparable to Inception-v3, but its responsiveness was notably higher than other Inception-v3-based DL models (<xref ref-type="bibr" rid="B25">25</xref>&#x2013;<xref ref-type="bibr" rid="B27">27</xref>).</p>
<p>However, as all studies tested in this same dataset (and most also trained on this dataset), we likely have high comparability but limited generalizability. Future studies should aim to test DL models on broad data, demonstrating robustness and generalizability. This review and the included studies have a number of limitations. First, the precision and recallresults for some types of lesions in our study, which we mentioned above, were limited. More training data for these lesions should becollected to improve the performance of our model. Second, The established dataset was not necessarily a good representation of data from screening programs in clinical practice. Thereby, the built dataset was not sufficient to reflect the algorithm&#x2019;s performance in broader clinical use. Future studies should consider including a wider outcome set and aim to test DL applications comprehensively in other study designs and settings (e.g., observational studies in clinical care, randomized controlled trials).</p>
</sec>
<sec id="s5" sec-type="conclusions">
<title>Conclusion</title>
<p>DL shows relatively high accuracy for detection of diabetic retinopathy, whether using a self-trained DL model or choosing an established AI model. The majority of studies focused on CNN(Inception-V3)to develop DL models. The results showed that the accuracy of DL models in evaluating diabetic retinopathy was highly consistent across different studies and superior to the control group, with no heterogeneity observed. Further validation with larger datasets is needed, and it is hoped that more randomized controlled trials will be used for model validation, and the true value of using DL in clinical care needs to be demonstrated. Future research should focus on standardizing datasets, improving model interpretability, and validating performance across diverse populations.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Material</bold>
</xref>. Further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s7" sec-type="author-contributions">
<title>Author contributions</title>
<p>ZB: Conceptualization, Formal Analysis, Writing &#x2013; original draft. JL: Data curation, Methodology, Writing &#x2013; original draft. QL: Visualization, Writing &#x2013; original draft. ZF: Conceptualization, Funding acquisition, Supervision, Writing &#x2013; review &amp; editing.</p>
</sec>
<sec id="s8" sec-type="funding-information">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This study was supported by research grants from the Natural Science Foundation of China (82174153), the Anhui University Collaborative Innovation Project (GXXT-2020-025), and Hefei Comprehensive National Science Center Big Health Research Institute &#x201c;Open list&#x201d; project (2023CXMMTCM003). 2023 Anhui University Research Project (2023AH050867).</p>
</sec>
<sec id="s9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be constructed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s11" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fendo.2025.1485311/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fendo.2025.1485311/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Image1.tif" id="SF1" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;1</label>
<caption>
<p>Funnel plot of studies reporting the effectiveness of imaging to screen for Diabetic Eye Disease (primary outcome).</p>
</caption>
</supplementary-material>
<supplementary-material xlink:href="Image2.tif" id="SM1" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;2</label>
<caption>
<p>Egger-test of studies reporting the effectiveness of imaging to screen for Diabetic Eye Disease (primary outcome).</p>
</caption>
</supplementary-material>
<supplementary-material xlink:href="Image3.tif" id="SF3" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;3</label>
<caption>
<p>Funnel plot of studies reporting the effectiveness of imaging to screen for Diabetic Eye Disease (secondary outcome).</p>
</caption>
</supplementary-material>
<supplementary-material xlink:href="Image4.tif" id="SF4" mimetype="image/tiff">
<label>Supplementary Figure&#xa0;4</label>
<caption>
<p>Egger-test of studies reporting the effectiveness of imaging to screen for Diabetic Eye Disease (secondary outcome).</p>
</caption>
</supplementary-material>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bhulakshmi</surname> <given-names>D</given-names>
</name>
<name>
<surname>Rajput</surname> <given-names>DS</given-names>
</name>
</person-group>. <article-title>A systematic review on diabetic retinopathy detection and classification based on deep learning techniques using fundus images</article-title>. <source>PeerJ Comput Sci</source>. (<year>2024</year>) <volume>10</volume>:<fpage>e1947</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.7717/peerj-cs.1947</pub-id>
</citation>
</ref>
<ref id="B2">
<label>2</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Usman</surname> <given-names>TM</given-names>
</name>
<name>
<surname>Saheed</surname> <given-names>YK</given-names>
</name>
<name>
<surname>Nsang</surname> <given-names>A</given-names>
</name>
<name>
<surname>Ajibesin</surname> <given-names>A</given-names>
</name>
<name>
<surname>Rakshit</surname> <given-names>S</given-names>
</name>
</person-group>. <article-title>A systematic literature review of machine learning based risk prediction models for diabetic retinopathy progression</article-title>. <source>Artif Intell Med</source>. (<year>2023</year>) <volume>143</volume>:<fpage>102617</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.artmed.2023.102617</pub-id>
</citation>
</ref>
<ref id="B3">
<label>3</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nunez do Rio</surname> <given-names>JM</given-names>
</name>
<name>
<surname>Nderitu</surname> <given-names>P</given-names>
</name>
<name>
<surname>Bergeles</surname> <given-names>C</given-names>
</name>
<name>
<surname>Sivaprasad</surname> <given-names>S</given-names>
</name>
<name>
<surname>Tan</surname> <given-names>GSW</given-names>
</name>
<name>
<surname>Raman</surname> <given-names>R</given-names>
</name>
</person-group>. <article-title>Evaluating a deep learning diabetic retinopathy grading system developed on mydriatic retinal images when applied to non-mydriatic community screening</article-title>. <source>J Clin Med</source>. (<year>2022</year>) <volume>11</volume>:<fpage>614</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/jcm11030614</pub-id>
</citation>
</ref>
<ref id="B4">
<label>4</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mamtora</surname> <given-names>S</given-names>
</name>
<name>
<surname>Wong</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Bell</surname> <given-names>D</given-names>
</name>
<name>
<surname>Sandinha</surname> <given-names>T</given-names>
</name>
</person-group>. <article-title>Bilateral birdshot retinochoroiditis and retinal astrocytoma</article-title>. <source>Case Rep Ophthalmol Med</source>. (<year>2017</year>) <volume>2017</volume>:<fpage>6586157</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1155/2017/6586157</pub-id>
</citation>
</ref>
<ref id="B5">
<label>5</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alyoubi</surname> <given-names>WL</given-names>
</name>
<name>
<surname>Shalash</surname> <given-names>WM</given-names>
</name>
<name>
<surname>Abulkhair</surname> <given-names>MF</given-names>
</name>
</person-group>. <article-title>Diabetic retinopathy detection through deep learning techniques: A review</article-title>. <source>Inf Med Unlocked</source>. (<year>2020</year>) <volume>20</volume>:<fpage>100377</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.imu.2020.100377</pub-id>
</citation>
</ref>
<ref id="B6">
<label>6</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Willis</surname> <given-names>JR</given-names>
</name>
<name>
<surname>Doan</surname> <given-names>QV</given-names>
</name>
<name>
<surname>Gleeson</surname> <given-names>M</given-names>
</name>
<name>
<surname>Haskova</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Ramulu</surname> <given-names>P</given-names>
</name>
<name>
<surname>Morse</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>Vision-related functional burden of diabetic retinopathy across severity levels in the United States</article-title>. <source>JAMA Ophthalmol</source>. (<year>2017</year>) <volume>135</volume>:<page-range>926&#x2013;32</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1001/jamaophthalmol.2017.2553</pub-id>
</citation>
</ref>
<ref id="B7">
<label>7</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Murugesan</surname> <given-names>N</given-names>
</name>
<name>
<surname>Ustunkaya</surname> <given-names>T</given-names>
</name>
<name>
<surname>Feener</surname> <given-names>EP</given-names>
</name>
</person-group>. <article-title>Thrombosis and hemorrhage in diabetic retinopathy: A perspective from an inflammatory standpoint</article-title>. <source>Semin Thromb Hemost</source>. (<year>2015</year>) <volume>41</volume>:<page-range>659&#x2013;64</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1055/s-0035-1556731</pub-id>
</citation>
</ref>
<ref id="B8">
<label>8</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dubow</surname> <given-names>M</given-names>
</name>
<name>
<surname>Pinhas</surname> <given-names>A</given-names>
</name>
<name>
<surname>Shah</surname> <given-names>N</given-names>
</name>
<name>
<surname>Cooper</surname> <given-names>RF</given-names>
</name>
<name>
<surname>Gan</surname> <given-names>A</given-names>
</name>
<name>
<surname>Gentile</surname> <given-names>RC</given-names>
</name>
<etal/>
</person-group>. <article-title>Classification of human retinal microaneurysms using adaptive optics scanning light ophthalmoscope fluorescein angiography</article-title>. <source>Invest Ophthalmol Vis Sci</source>. (<year>2014</year>) <volume>55</volume>:<page-range>1299&#x2013;309</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1167/iovs.13-13122</pub-id>
</citation>
</ref>
<ref id="B9">
<label>9</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pavlovicova</surname> <given-names>J</given-names>
</name>
<name>
<surname>Macsik</surname> <given-names>P</given-names>
</name>
<name>
<surname>Goga</surname> <given-names>J</given-names>
</name>
<name>
<surname>Kajan</surname> <given-names>S</given-names>
</name>
</person-group>. <article-title>Local binary CNN for diabetic retinopathy classification on fundus images</article-title>. <source>Acta Polytechnica Hungarica</source>. (<year>2022</year>) <volume>19</volume>:<fpage>19</fpage>.</citation>
</ref>
<ref id="B10">
<label>10</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Taylor</surname> <given-names>R</given-names>
</name>
<name>
<surname>Batey</surname> <given-names>D</given-names>
</name>
</person-group>. <source>Handbook of retinal screening in diabetes: diagnosis and management</source>. <publisher-name>John Wiley &amp; Sons</publisher-name> (<year>2012</year>). Available online at: <uri xlink:href="https://www.wiley.com/go/taylor/retinalscreening">www.wiley.com/go/taylor/retinalscreening</uri>.</citation>
</ref>
<ref id="B11">
<label>11</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kazakh-British</surname> <given-names>NP</given-names>
</name>
<name>
<surname>Pak</surname> <given-names>A</given-names>
</name>
<name>
<surname>Abdullina</surname> <given-names>D</given-names>
</name>
</person-group>. (<year>2018</year>). <article-title>Automatic detection of blood vessels and classification in retinal images for diabetic retinopathy diagnosis with application of convolution neural network</article-title>, in: <conf-name>Proceedings of the 2018 international conference on sensors, signal and image processing</conf-name>, <publisher-name>ACM</publisher-name>. pp. <page-range>60&#x2013;3</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/3290589.3290596</pub-id>
</citation>
</ref>
<ref id="B12">
<label>12</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pak</surname> <given-names>A</given-names>
</name>
<name>
<surname>Ziyaden</surname> <given-names>A</given-names>
</name>
<name>
<surname>Tukeshev</surname> <given-names>K</given-names>
</name>
<name>
<surname>Jaxylykova</surname> <given-names>A</given-names>
</name>
<name>
<surname>Abdullina</surname> <given-names>D</given-names>
</name>
</person-group>. <article-title>Comparative analysis of deep learning methods of detection of diabetic retinopathy</article-title>. <source>Cogent Eng</source>. (<year>2020</year>) <volume>7</volume>:<fpage>1805144</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1080/23311916.2020.1805144</pub-id>
</citation>
</ref>
<ref id="B13">
<label>13</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Khalifa</surname> <given-names>NEM</given-names>
</name>
<name>
<surname>Loey</surname> <given-names>M</given-names>
</name>
<name>
<surname>Taha</surname> <given-names>MHN</given-names>
</name>
<name>
<surname>Mohamed</surname> <given-names>HNET</given-names>
</name>
</person-group>. <article-title>Deep transfer learning models for medical diabetic retinopathy detection</article-title>. <source>Acta Informatica Med</source>. (<year>2019</year>) <volume>27</volume>:<fpage>327</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.5455/aim.2019.27.327-332</pub-id>
</citation>
</ref>
<ref id="B14">
<label>14</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moher</surname> <given-names>D</given-names>
</name>
<name>
<surname>Liberati</surname> <given-names>A</given-names>
</name>
<name>
<surname>Tetzlaff</surname> <given-names>J</given-names>
</name>
<name>
<surname>Altman</surname> <given-names>DG</given-names>
</name>
<name>
<surname>Group</surname> <given-names>P</given-names>
</name>
</person-group>. <article-title>Preferred reporting items for systematic reviews and meta-analyses: the PRISMA statement</article-title>. <source>Ann Intern Med</source>. (<year>2009</year>) <volume>151</volume>:<fpage>264</fpage>&#x2013;<lpage>9, W64</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1136/bmj.b2535</pub-id>
</citation>
</ref>
<ref id="B15">
<label>15</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hongqiu</surname> <given-names>G</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>W</given-names>
</name>
<name>
<surname>Wei</surname> <given-names>L</given-names>
</name>
</person-group>. <article-title>Application of Cochrane bias risk assessment tool in meta-analysis of randomized controlled study</article-title>. <source>Chin Circ J</source>. (<year>2014</year>) <volume>29</volume>:<page-range>147&#x2013;8</page-range>. Available online at: <uri xlink:href="https://kns.cnki.net/kcms2/article/abstract?v=DnpHqYycDUOhULxQkdutHSjpEo1q2fnPMGquCcUGdAhttcMOp1PeKgUjzr0BP5J_niVqznhs6RUCXB_Z2PjbIVF6XuR0KY3jGCmNC5wBUAcad6ID8Hunoy9VVaea131k88AVTQG9EjznitxiMCUa1aw6B89As-YU64QKAWKm15S3Uesf7gqjP7FtNqiLoR">https://kns.cnki.net/kcms2/article/abstract?v=DnpHqYycDUOhULxQkdutHSjpEo1q2fnPMGquCcUGdAhttcMOp1PeKgUjzr0BP5J_niVqznhs6RUCXB_Z2PjbIVF6XuR0KY3jGCmNC5wBUAcad6ID8Hunoy9VVaea131k88AVTQG9EjznitxiMCUa1aw6B89As-YU64QKAWKm15S3Uesf7gqjP7FtNqiLoR</uri>.</citation>
</ref>
<ref id="B16">
<label>16</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chao</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Feng</surname> <given-names>S</given-names>
</name>
<name>
<surname>Xiantao</surname> <given-names>Z</given-names>
</name>
</person-group>. <article-title>software calls JAGS software to realize network meta-analysis</article-title>. <source>Chin J Evid Based Med</source>. (<year>2014</year>) <volume>14</volume>:<page-range>241&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.7507/1672-2531.20140042</pub-id>
</citation>
</ref>
<ref id="B17">
<label>17</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dan</surname> <given-names>W</given-names>
</name>
<name>
<surname>Junxia</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Zhenyun</surname> <given-names>M</given-names>
</name>
</person-group>. <article-title>Heterogeneity and its treatment in meta analysis</article-title>. <source>Chin J Evid Based Med</source>. (<year>2009</year>) <volume>9</volume>:<page-range>1115&#x2013;8</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.7507/1672-2531.20090196</pub-id>
</citation>
</ref>
<ref id="B18">
<label>18</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Higgins</surname> <given-names>JP</given-names>
</name>
<name>
<surname>Thompson</surname> <given-names>SG</given-names>
</name>
</person-group>. <article-title>Quantifying heterogeneity in a meta-analysis</article-title>. <source>Stat Med</source>. (<year>2002</year>) <volume>21</volume>:<page-range>1539&#x2013;58</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/sim.v21:11</pub-id>
</citation>
</ref>
<ref id="B19">
<label>19</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Van Valkenhoef</surname> <given-names>G</given-names>
</name>
<name>
<surname>Tervonen</surname> <given-names>T</given-names>
</name>
<name>
<surname>Zwinkels</surname> <given-names>T</given-names>
</name>
<name>
<surname>De Brock</surname> <given-names>B</given-names>
</name>
<name>
<surname>Hillege</surname> <given-names>H</given-names>
</name>
</person-group>. <article-title>ADDIS: a decision support system for evidence-based medicine</article-title>. <source>Decision Support Syst</source>. (<year>2013</year>) <volume>55</volume>:<page-range>459&#x2013;75</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.dss.2012.10.005</pub-id>
</citation>
</ref>
<ref id="B20">
<label>20</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Viechtbauer</surname> <given-names>W</given-names>
</name>
</person-group>. <article-title>Conducting meta-analyses in R with the metafor package</article-title>. <source>J Stat Softw</source>. (<year>2010</year>) <volume>36</volume>:<fpage>1</fpage>&#x2013;<lpage>48</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.18637/jss.v036.i03</pub-id>
</citation>
</ref>
<ref id="B21">
<label>21</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wolf</surname> <given-names>RM</given-names>
</name>
<name>
<surname>Channa</surname> <given-names>R</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>TYA</given-names>
</name>
<name>
<surname>Zehra</surname> <given-names>A</given-names>
</name>
<name>
<surname>Bromberger</surname> <given-names>L</given-names>
</name>
<name>
<surname>Patel</surname> <given-names>D</given-names>
</name>
<etal/>
</person-group>. <article-title>Autonomous artificial intelligence increases screening and follow-up for diabetic retinopathy in youth: the ACCESS randomized control trial</article-title>. <source>Nat Commun</source>. (<year>2024</year>) <volume>15</volume>:<fpage>421</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41467-023-44676-z</pub-id>
</citation>
</ref>
<ref id="B22">
<label>22</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>SL</given-names>
</name>
<name>
<surname>Gonder</surname> <given-names>JR</given-names>
</name>
<name>
<surname>Owrangi</surname> <given-names>E</given-names>
</name>
<name>
<surname>Klar</surname> <given-names>NS</given-names>
</name>
<name>
<surname>Hramiak</surname> <given-names>IM</given-names>
</name>
<name>
<surname>Uvarov</surname> <given-names>A</given-names>
</name>
<etal/>
</person-group>. <article-title>Effectiveness of nonmydriatic ultra-widefield retinal imaging to screen for diabetic eye disease: A randomized controlled trial (Clearsight)</article-title>. <source>Diabetes Care</source>. (<year>2023</year>) <volume>46</volume>:<fpage>399</fpage>&#x2013;<lpage>407</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.2337/dc22-0713</pub-id>
</citation>
</ref>
<ref id="B23">
<label>23</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fransen</surname> <given-names>SR</given-names>
</name>
<name>
<surname>Leonard-Martin</surname> <given-names>TC</given-names>
</name>
<name>
<surname>Feuer</surname> <given-names>WJ</given-names>
</name>
<name>
<surname>Hildebrand</surname> <given-names>PL</given-names>
</name>
<name>
<surname>Inoveon Health Research</surname> <given-names>G</given-names>
</name>
</person-group>. <article-title>Clinical evaluation of patients with diabetic retinopathy: accuracy of the Inoveon diabetic retinopathy-3DT system</article-title>. <source>Ophthalmology</source>. (<year>2002</year>) <volume>109</volume>:<fpage>595</fpage>&#x2013;<lpage>601</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S0161-6420(01)00990-3</pub-id>
</citation>
</ref>
<ref id="B24">
<label>24</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Naz</surname> <given-names>H</given-names>
</name>
<name>
<surname>Nijhawan</surname> <given-names>R</given-names>
</name>
<name>
<surname>Ahuja</surname> <given-names>NJ</given-names>
</name>
</person-group>. <article-title>An automated unsupervised deep learning-based approach for diabetic retinopathy detection</article-title>. <source>Med Biol Eng Comput</source>. (<year>2022</year>) <volume>60</volume>:<page-range>3635&#x2013;54</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11517-022-02688-9</pub-id>
</citation>
</ref>
<ref id="B25">
<label>25</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Yu</surname> <given-names>M</given-names>
</name>
<name>
<surname>Hu</surname> <given-names>B</given-names>
</name>
<name>
<surname>Jin</surname> <given-names>X</given-names>
</name>
<name>
<surname>Li</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>X</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning-based detection and stage grading for optimising diagnosis of diabetic retinopathy</article-title>. <source>Diabetes Metab Res Rev</source>. (<year>2021</year>) <volume>37</volume>:<fpage>e3445</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/dmrr.v37.4</pub-id>
</citation>
</ref>
<ref id="B26">
<label>26</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alwakid</surname> <given-names>G</given-names>
</name>
<name>
<surname>Gouda</surname> <given-names>W</given-names>
</name>
<name>
<surname>Humayun</surname> <given-names>M</given-names>
</name>
</person-group>. <article-title>Deep Learning-based prediction of Diabetic Retinopathy using CLAHE and ESRGAN for Enhancement</article-title>. <source>Healthc MDPI</source>. (<year>2023</year>) <volume>pp</volume>:<fpage>863</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/healthcare11060863</pub-id>
</citation>
</ref>
<ref id="B27">
<label>27</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mehboob</surname> <given-names>A</given-names>
</name>
<name>
<surname>Akram</surname> <given-names>MU</given-names>
</name>
<name>
<surname>Alghamdi</surname> <given-names>NS</given-names>
</name>
<name>
<surname>Abdul Salam</surname> <given-names>A</given-names>
</name>
</person-group>. <article-title>A deep learning based approach for grading of diabetic retinopathy using large fundus image dataset</article-title>. <source>Diagnostics</source>. (<year>2022</year>) <volume>12</volume>:<fpage>3084</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/diagnostics12123084</pub-id>
</citation>
</ref>
<ref id="B28">
<label>28</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>F</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>T</given-names>
</name>
<name>
<surname>Dong</surname> <given-names>L</given-names>
</name>
<name>
<surname>Yan</surname> <given-names>L</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>M</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning-based automated detection for diabetic retinopathy and diabetic macular oedema in retinal fundus photographs</article-title>. <source>Eye (Lond)</source>. (<year>2022</year>) <volume>36</volume>:<page-range>1433&#x2013;41</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41433-021-01552-8</pub-id>
</citation>
</ref>
<ref id="B29">
<label>29</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Surya</surname> <given-names>J</given-names>
</name>
<name>
<surname>Pandy</surname> <given-names>N</given-names>
</name>
<name>
<surname>Rim</surname> <given-names>TH</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>G</given-names>
</name>
<name>
<surname>Priya</surname> <given-names>MS</given-names>
</name>
<name>
<surname>Subramanian</surname> <given-names>B</given-names>
</name>
<etal/>
</person-group>. <article-title>Efficacy of deep learning-based artificial intelligence models in screening and referring patients with diabetic retinopathy and glaucoma</article-title>. <source>Indian J Ophthalmol</source>. (<year>2023</year>) <volume>71</volume>:<page-range>3039&#x2013;45</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.4103/IJO.IJO_11_23</pub-id>
</citation>
</ref>
<ref id="B30">
<label>30</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mansour</surname> <given-names>RF</given-names>
</name>
</person-group>. <article-title>Deep-learning-based automatic computer-aided diagnosis system for diabetic retinopathy</article-title>. <source>BioMed Eng Lett</source>. (<year>2018</year>) <volume>8</volume>:<fpage>41</fpage>&#x2013;<lpage>57</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s13534-017-0047-y</pub-id>
</citation>
</ref>
<ref id="B31">
<label>31</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gadekallu</surname> <given-names>TR</given-names>
</name>
<name>
<surname>Khare</surname> <given-names>N</given-names>
</name>
<name>
<surname>Bhattacharya</surname> <given-names>S</given-names>
</name>
<name>
<surname>Singh</surname> <given-names>S</given-names>
</name>
<name>
<surname>Maddikunta</surname> <given-names>PKR</given-names>
</name>
<name>
<surname>Srivastava</surname> <given-names>G</given-names>
</name>
</person-group>. <article-title>Deep neural networks to predict diabetic retinopathy</article-title>. <source>J Ambient Intell Humanized Comput</source>. (<year>2023</year>) <volume>14</volume>:<page-range>5407&#x2013;20</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s12652-020-01963-7</pub-id>
</citation>
</ref>
<ref id="B32">
<label>32</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rajalakshmi</surname> <given-names>R</given-names>
</name>
<name>
<surname>Subashini</surname> <given-names>R</given-names>
</name>
<name>
<surname>Anjana</surname> <given-names>RM</given-names>
</name>
<name>
<surname>Mohan</surname> <given-names>V</given-names>
</name>
</person-group>. <article-title>Automated diabetic retinopathy detection in smartphone-based fundus photography using artificial intelligence</article-title>. <source>Eye (Lond)</source>. (<year>2018</year>) <volume>32</volume>:<page-range>1138&#x2013;44</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41433-018-0064-9</pub-id>
</citation>
</ref>
<ref id="B33">
<label>33</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>de La Torre</surname> <given-names>J</given-names>
</name>
<name>
<surname>Valls</surname> <given-names>A</given-names>
</name>
<name>
<surname>Puig</surname> <given-names>D</given-names>
</name>
</person-group>. <article-title>A deep learning interpretable classifier for diabetic retinopathy disease grading</article-title>. <source>Neurocomputing</source>. (<year>2020</year>) <volume>396</volume>:<page-range>465&#x2013;76</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.neucom.2018.07.102</pub-id>
</citation>
</ref>
<ref id="B34">
<label>34</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kermany</surname> <given-names>DS</given-names>
</name>
<name>
<surname>Goldbaum</surname> <given-names>M</given-names>
</name>
<name>
<surname>Cai</surname> <given-names>W</given-names>
</name>
<name>
<surname>Valentim</surname> <given-names>CCS</given-names>
</name>
<name>
<surname>Liang</surname> <given-names>H</given-names>
</name>
<name>
<surname>Baxter</surname> <given-names>SL</given-names>
</name>
<etal/>
</person-group>. <article-title>Identifying medical diagnoses and treatable diseases by image-based deep learning</article-title>. <source>Cell</source>. (<year>2018</year>) <volume>172</volume>:<fpage>1122</fpage>&#x2013;<lpage>1131 e9</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cell.2018.02.010</pub-id>
</citation>
</ref>
<ref id="B35">
<label>35</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sayres</surname> <given-names>R</given-names>
</name>
<name>
<surname>Taly</surname> <given-names>A</given-names>
</name>
<name>
<surname>Rahimy</surname> <given-names>E</given-names>
</name>
<name>
<surname>Blumer</surname> <given-names>K</given-names>
</name>
<name>
<surname>Coz</surname> <given-names>D</given-names>
</name>
<name>
<surname>Hammel</surname> <given-names>N</given-names>
</name>
<etal/>
</person-group>. <article-title>Using a deep learning algorithm and integrated gradients explanation to assist grading for diabetic retinopathy</article-title>. <source>Ophthalmology</source>. (<year>2019</year>) <volume>126</volume>:<page-range>552&#x2013;64</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ophtha.2018.11.016</pub-id>
</citation>
</ref>
<ref id="B36">
<label>36</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Araujo</surname> <given-names>T</given-names>
</name>
<name>
<surname>Aresta</surname> <given-names>G</given-names>
</name>
<name>
<surname>Mendonca</surname> <given-names>L</given-names>
</name>
<name>
<surname>Penas</surname> <given-names>S</given-names>
</name>
<name>
<surname>Maia</surname> <given-names>C</given-names>
</name>
<name>
<surname>Carneiro</surname> <given-names>A</given-names>
</name>
<etal/>
</person-group>. <article-title>DR|GRADUATE: Uncertainty-aware deep learning-based diabetic retinopathy grading in eye fundus images</article-title>. <source>Med Image Anal</source>. (<year>2020</year>) <volume>63</volume>:<fpage>101715</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.media.2020.101715</pub-id>
</citation>
</ref>
<ref id="B37">
<label>37</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname> <given-names>J</given-names>
</name>
<name>
<surname>Cao</surname> <given-names>T</given-names>
</name>
<name>
<surname>Xu</surname> <given-names>F</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>S</given-names>
</name>
<name>
<surname>Tao</surname> <given-names>H</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>T</given-names>
</name>
<etal/>
</person-group>. <article-title>Artificial intelligence-based screening for diabetic retinopathy at community hospital</article-title>. <source>Eye</source>. (<year>2020</year>) <volume>34</volume>:<page-range>572&#x2013;6</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41433-019-0562-4</pub-id>
</citation>
</ref>
<ref id="B38">
<label>38</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Odeh</surname> <given-names>I</given-names>
</name>
<name>
<surname>Alkasassbeh</surname> <given-names>M</given-names>
</name>
<name>
<surname>Alauthman</surname> <given-names>M</given-names>
</name>
</person-group>. (<year>2021</year>). <article-title>Diabetic retinopathy detection using ensemble machine learning</article-title>, in: <conf-name>2021 international conference on information technology (ICIT)</conf-name>, . pp. <page-range>173&#x2013;8</page-range>. <publisher-name>IEEE</publisher-name>. Available online at: <uri xlink:href="https://ieeexplore.ieee.org/abstract/document/9491645">https://ieeexplore.ieee.org/abstract/document/9491645</uri>.</citation>
</ref>
<ref id="B39">
<label>39</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Nagi</surname> <given-names>AT</given-names>
</name>
<name>
<surname>Awan</surname> <given-names>MJ</given-names>
</name>
<name>
<surname>Javed</surname> <given-names>R</given-names>
</name>
<name>
<surname>Ayesha</surname> <given-names>N</given-names>
</name>
</person-group>. (<year>2021</year>). <article-title>A comparison of two-stage classifier algorithm with ensemble techniques on detection of diabetic retinopathy</article-title>, in: <conf-name>2021 1st International Conference on Artificial Intelligence and Data Analytics (CAIDA)</conf-name>, . pp. <page-range>212&#x2013;5</page-range>. <publisher-name>IEEE</publisher-name>. Available online at: <uri xlink:href="https://ieeexplore.ieee.org/abstract/document/9425129">https://ieeexplore.ieee.org/abstract/document/9425129</uri>.</citation>
</ref>
<ref id="B40">
<label>40</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Sridhar</surname> <given-names>S</given-names>
</name>
<name>
<surname>Sanagavarapu</surname> <given-names>S</given-names>
</name>
</person-group>. (<year>2020</year>). <article-title>Detection and prognosis evaluation of diabetic retinopathy using ensemble deep convolutional neural networks</article-title>, in: <conf-name>2020 International Electronics Symposium (IES)</conf-name>, . pp. <fpage>78</fpage>&#x2013;<lpage>85</lpage>. <publisher-name>IEEE</publisher-name>. Available online at: <uri xlink:href="https://ieeexplore.ieee.org/abstract/document/9231789">https://ieeexplore.ieee.org/abstract/document/9231789</uri>.</citation>
</ref>
<ref id="B41">
<label>41</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nderitu</surname> <given-names>P</given-names>
</name>
<name>
<surname>do Rio</surname> <given-names>JMN</given-names>
</name>
<name>
<surname>Rasheed</surname> <given-names>R</given-names>
</name>
<name>
<surname>Raman</surname> <given-names>R</given-names>
</name>
<name>
<surname>Rajalakshmi</surname> <given-names>R</given-names>
</name>
<name>
<surname>Bergeles</surname> <given-names>C</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning for gradability classification of handheld, non-mydriatic retinal images</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>:<fpage>9469</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-021-89027-4</pub-id>
</citation>
</ref>
<ref id="B42">
<label>42</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Haenssle</surname> <given-names>HA</given-names>
</name>
<name>
<surname>Fink</surname> <given-names>C</given-names>
</name>
<name>
<surname>Uhlmann</surname> <given-names>L</given-names>
</name>
</person-group>. <article-title>Reply to the letter to the Editor &#x201c;Reply to &#x2018;Man against machine: diagnostic performance of a deep learning convolutional neural network for dermoscopic melanoma recognition in comparison to 58 dermatologists&#x2019; by H. A. Haenssle et&#xa0;al. &#x201c; by L. Oakden-Rayner</article-title>. <source>Ann Oncol</source>. (<year>2019</year>) <volume>30</volume>:<fpage>130e</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1093/annonc/mdy520</pub-id>
</citation>
</ref>
<ref id="B43">
<label>43</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Koshy</surname> <given-names>R</given-names>
</name>
<name>
<surname>Mahmood</surname> <given-names>A</given-names>
</name>
</person-group>. <article-title>Optimizing deep CNN architectures for face liveness detection</article-title>. <source>Entropy (Basel)</source>. (<year>2019</year>) <volume>21</volume>:<fpage>423</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/e21040423</pub-id>
</citation>
</ref>
<ref id="B44">
<label>44</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sahoo</surname> <given-names>PK</given-names>
</name>
<name>
<surname>Mohapatra</surname> <given-names>S</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>CY</given-names>
</name>
<name>
<surname>Huang</surname> <given-names>KL</given-names>
</name>
<name>
<surname>Chang</surname> <given-names>TY</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>TH</given-names>
</name>
</person-group>. <article-title>Automatic identification of early ischemic lesions on non-contrast CT with deep learning approach</article-title>. <source>Sci Rep</source>. (<year>2022</year>) <volume>12</volume>:<fpage>18054</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-022-22939-x</pub-id>
</citation>
</ref>
<ref id="B45">
<label>45</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mohapatra</surname> <given-names>S</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>TH</given-names>
</name>
<name>
<surname>Sahoo</surname> <given-names>PK</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>CY</given-names>
</name>
</person-group>. <article-title>Localization of early infarction on non-contrast CT images in acute ischemic stroke with deep learning approach</article-title>. <source>Sci Rep</source>. (<year>2023</year>) <volume>13</volume>:<fpage>19442</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-023-45573-7</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>