<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="editorial" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Cell Dev. Biol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Cell and Developmental Biology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Cell Dev. Biol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-634X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1762437</article-id>
<article-id pub-id-type="doi">10.3389/fcell.2026.1762437</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Editorial</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Editorial: Artificial intelligence applications in chronic ocular diseases, volume II</article-title>
<alt-title alt-title-type="left-running-head">Yang et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2026.1762437">10.3389/fcell.2026.1762437</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Yang</surname>
<given-names>Weihua</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1439952"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Fang</surname>
<given-names>Huihui</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2152748"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Xu</surname>
<given-names>Yanwu</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1491541"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Chi</surname>
<given-names>Wei</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1081502"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>Shenzhen Eye Hospital, Shenzhen Eye Medical Center, Southern Medical University</institution>, <city>Shenzhen</city>, <country country="CN">China</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>Pazhou Lab</institution>, <city>Guangzhou</city>, <state>Guangdong</state>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>School of Future Technology, South China University of Technology</institution>, <city>Guangzhou</city>, <state>Guangdong</state>, <country country="CN">China</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Yanwu Xu, <email xlink:href="mailto:ywxu@ieee.org">ywxu@ieee.org</email>; Wei Chi, <email xlink:href="mailto:chiwei@mail.sysu.edu.cn">chiwei@mail.sysu.edu.cn</email>
</corresp>
<fn fn-type="equal" id="fn001">
<label>&#x2020;</label>
<p>These authors have contributed equally to this work</p>
</fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-24">
<day>24</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>14</volume>
<elocation-id>1762437</elocation-id>
<history>
<date date-type="received">
<day>07</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>09</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>13</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Yang, Fang, Xu and Chi.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Yang, Fang, Xu and Chi</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-24">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<kwd-group>
<kwd>artificial intelligence in ophthalmology</kwd>
<kwd>chronic ocular diseases</kwd>
<kwd>clinical workflow integration</kwd>
<kwd>multimodal imaging and analysis</kwd>
<kwd>ocular biomarkers and systemic diseases</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by Guangdong Basic and Applied Basic Research Foundation (2025A1515011627), National Natural Science Foundation of China (82571272), Sanming Project of Medicine in Shenzhen (SZSM202311012), Shenzhen Science and Technology Program (JCYJ20240813152704006), and Guangzhou Science and Technology Project (2024D03J0013).</funding-statement>
</funding-group>
<counts>
<fig-count count="1"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="20"/>
<page-count count="8"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Molecular and Cellular Pathology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
<notes notes-type="frontiers-research-topic">
<p>Editorial on the Research Topic <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/research-topics/65758">Artificial intelligence applications in chronic ocular diseases, volume II</ext-link>
</p>
</notes>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Artificial intelligence (AI) has rapidly reshaped the landscape of ophthalmic research and clinical practice, offering unprecedented capabilities for disease detection, quantitative phenotyping, risk prediction and decision support across a broad spectrum of chronic ocular conditions (<xref ref-type="bibr" rid="B18">Xu and Yang, 2023</xref>; <xref ref-type="bibr" rid="B14">Wang et al., 2024</xref>). In recent years, advances in deep learning, multimodal fusion and large-scale representation learning have driven substantial progress in the diagnosis and screening of retinal diseases such as diabetic retinopathy (DR) (<xref ref-type="bibr" rid="B7">Li et al., 2023</xref>; <xref ref-type="bibr" rid="B12">Ming and Lei, 2019</xref>), macular disorders (<xref ref-type="bibr" rid="B2">Feng et al., 2023</xref>) and high myopia&#x2013;related pathology (<xref ref-type="bibr" rid="B15">Wu et al., 2022</xref>), as well as in glaucoma (<xref ref-type="bibr" rid="B7">Li et al., 2023</xref>; <xref ref-type="bibr" rid="B1">Cong et al., 2024</xref>; <xref ref-type="bibr" rid="B13">Qian et al., 2024</xref>), cataract, anterior segment abnormalities (<xref ref-type="bibr" rid="B6">Jiang et al., 2023</xref>), ocular surface disease (<xref ref-type="bibr" rid="B5">Ji et al., 2022</xref>; <xref ref-type="bibr" rid="B11">Mimazhuoma and Ji, 2023</xref>) and orbital disorders.</p>
<p>Parallel to these developments, the eye has increasingly been recognized as a sensitive biomarker source for systemic chronic diseases&#x2014;including cardiovascular disease, hypertension, diabetes and peripheral neuropathy&#x2014;supported by growing evidence that ocular microvascular, neurostructural and biomechanical features reflect systemic pathophysiology (<xref ref-type="bibr" rid="B4">Hui et al., 2024</xref>; <xref ref-type="bibr" rid="B9">Liu Z. et al., 2025</xref>). Together, these developments underscore the dual role of ophthalmic AI as both a disease-focused diagnostic tool and a window into whole-body health.</p>
<p>At the same time, methodological advances in quantitative image analysis (<xref ref-type="bibr" rid="B20">Zhou et al., 2023</xref>), 3D segmentation (<xref ref-type="bibr" rid="B16">Wu et al., 2024</xref>; <xref ref-type="bibr" rid="B9">Liu Z. et al., 2025</xref>), cross-device standardization, quality assurance and multimodal intelligent systems have transformed AI from a standalone algorithmic tool into a foundational analytical infrastructure (<xref ref-type="bibr" rid="B17">Wu et al., 2025</xref>) capable of integrating imaging, clinical data and text-based information. AI is now increasingly embedded within clinical workflows (<xref ref-type="bibr" rid="B8">Li et al., 2024</xref>), supporting perioperative decision-making for glaucoma, refractive and cataract surgeries, predicting anatomical and functional treatment responses, and enabling long-term monitoring in chronic disease management (<xref ref-type="bibr" rid="B19">Yang et al., 2024</xref>).</p>
<p>The integration of multi-omics modalities&#x2014;such as aqueous humor proteomics, transcriptomics and functional neuroimaging&#x2014;further extends AI applications from phenotype recognition to mechanistic discovery, offering insights into aging, neurovascular coupling and immunometabolic remodeling in chronic ocular conditions. Meanwhile, systematic reviews and population-level studies emphasize the potential of AI to enhance public health strategies, including large-scale screening programs, disease surveillance, teleophthalmology and medical education (<xref ref-type="bibr" rid="B3">Gong et al., 2024</xref>).</p>
<p>Despite these promising advances, significant challenges remain in the areas of generalizability, robustness to real-world variability (<xref ref-type="bibr" rid="B10">Liu M. et al., 2025</xref>), interpretability, multimodal data integration, regulatory readiness and ethical governance (<xref ref-type="bibr" rid="B8">Li et al., 2024</xref>; <xref ref-type="bibr" rid="B3">Gong et al., 2024</xref>). These limitations highlight the need for foundation models (<xref ref-type="bibr" rid="B17">Wu et al., 2025</xref>), cross-organ risk assessment platforms, causality-aware and transparent algorithms, and standardized evaluation frameworks that can support safe and equitable deployment across diverse populations and healthcare environments.</p>
<p>We launched a new round of this research initiative to systematically explore the latest advancements in artificial intelligence applications for chronic ocular diseases. This year, we received 82 submissions, and after rigorous peer review and quality assessment, 57 representative papers were accepted for publication. To date, these articles have accumulated over 28,000 downloads, with a combined 129,000 views and downloads, reflecting the sustained interest in and significant scientific impact of AI-driven research in the field of chronic eye diseases.</p>
<p>This editorial provides a comprehensive synthesis of these AI applications in chronic ocular diseases across six major domains&#x2014;disease diagnosis and screening, ocular biomarkers of systemic disease, quantitative analytics and workflow integration, AI-assisted surgery and treatment planning, public-health and review perspectives, and future research directions. The conceptual relationships among these domains are summarized in <xref ref-type="fig" rid="F1">Figure 1</xref>, illustrating how disease-oriented and biomarker-driven research converges into shared analytical infrastructures and expands toward surgical, public-health and unified ophthalmic AI ecosystems.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Logical relationships among major AI research directions in chronic ocular diseases. Disease&#x2010;focused and biomarker&#x2010;focused studies (Ch. 2&#x2013;3) feed into a shared infrastructure of quantitative analysis and workflow integration (Ch. 4), which branches into surgical/precision medicine and public health applications (Ch. 5&#x2013;6) and jointly informs future unified and clinically deployable AI ecosystems (Ch. 7).</p>
</caption>
<graphic xlink:href="fcell-14-1762437-g001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating six AI research areas in chronic ocular diseases: diagnosis and screening, ocular biomarkers, quantitative analysis, surgery and multi-omics, review and public health, and future directions, each with related subtopics and study references.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2">
<label>2</label>
<title>AI for diagnosis and screening of chronic ocular diseases</title>
<sec id="s2-1">
<label>2.1</label>
<title>Retinal and diabetic eye diseases</title>
<p>In the field of diabetic retinopathy (DR) and related retinal diseases, multiple studies have systematically demonstrated the potential of AI across the entire pipeline from lesion detection through microcirculation assessment to risk stratification.</p>
<p>At the image level, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608580">Hu et al.</ext-link> introduced a lightweight attention mechanism based on ultra-widefield (UWF) fundus images, achieving substantial improvements in DR lesion detection accuracy while maintaining low computational complexity. This work highlights the engineering advantage of &#x201c;high performance &#x2b; low computational cost&#x201d; in resource-constrained environments. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1484880">Zuo et al.</ext-link> proposed a multi-scale visual Mamba (MRVM) model on optical coherence tomography (OCT), achieving classification accuracies of approximately 98.98% and 96.21% on two public retinal datasets, demonstrating the clear advantages of next-generation sequence modeling architectures in interpreting volumetric retinal data. For color fundus photography, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608988">Zhang et al.</ext-link> fused deep features extracted by convolutional neural networks (CNNs) with radiomics features, and improved DR classification performance using a combination of label smoothing and graph-constrained collaborative learning, emphasizing the role of &#x201c;multi-feature fusion &#x2b; regularization&#x201d; in suppressing overfitting and enhancing inter-class separability.</p>
<p>In the OCT angiography (OCTA) domain, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1609928">Wu et al.</ext-link> used swept-source OCTA (SS-OCTA) and found that the choroidal vascularity index (CVI) in highly myopic patients with diabetes was significantly lower than in patients with DR alone, suggesting that CVI, as a quantitative AI-derived biomarker, may be useful for early risk stratification in high-risk populations. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1501625">Li et al.</ext-link> examined peripapillary atrophy (PPA) subregions with SS-OCTA and found that &#x3b3;-zone PPA was associated with a reduced risk of DR. They proposed that &#x201c;myopia-related posterior pole thinning/microvascular depletion&#x201d; may exert a structural &#x201c;protective effect&#x201d; against DR, providing new imaging evidence for the interaction between myopia and DR.</p>
<p>For multiple retinal diseases, the WARN model proposed by <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fradi.2025.1608052">Guo et al.</ext-link> achieved strong performance in classifying seven common retinal conditions, while <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608325">Qi et al.</ext-link> reported similarly robust discriminative performance, further underscoring the practical value of AI in multi-disease retinal diagnosis. In terms of multimodal fusion, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1665173">Gu et al.</ext-link> developed the Fusion-MIL model that jointly models color fundus photographs and OCT representations, demonstrating superior diagnostic performance over any single-modality model and strong cross-device generalizability and fine-grained grading capacity&#x2014;features particularly valuable for long-term follow-up in multicenter, multi-device environments. On UWF images, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1703606">Duan et al.</ext-link> built a model capable of automatic recognition and classification of multiple ocular diseases, providing a technical foundation for optimizing outpatient workflows and automated pre-screening. For fluorescein fundus angiography (FFA), <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1703606">Duan et al.</ext-link> further showed that knowledge-enhanced pretraining could markedly improve diagnostic accuracy and generalization, demonstrating that domain knowledge injection is particularly beneficial for complex multi-frame imaging.</p>
<p>Overall, this body of work indicates that AI in retinal and DR applications has evolved from single-disease, single-modality classifiers toward multi-disease, multimodal and multi-scenario &#x201c;system-level risk assessors&#x201d;.</p>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Glaucoma, cataract and anterior segment disorders</title>
<p>In glaucoma, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1583330">Huang et al.</ext-link> constructed a random forest model guided by Boruta feature selection using demographic characteristics, metabolic indicators and biochemical parameters to predict the risk of neovascular glaucoma (NVG) in patients with proliferative diabetic retinopathy (PDR). This study highlights the value of cross-system modeling that integrates &#x201c;internal medicine metabolic indicators &#x2b; ophthalmic imaging/clinical data&#x201d;, and shows that traditional machine learning models remain highly interpretable and practical in scenarios with limited sample sizes.</p>
<p>In cataract, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1669696">Tang et al.</ext-link> built an AI model using slit-lamp retroillumination images to achieve automatic diagnosis and grading, establishing a novel quantitative assessment system that can significantly improve efficiency and consistency in primary care and large-scale screening.</p>
<p>Anterior segment and ocular surface diseases are also key components of chronic ocular disease management. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1612303">Wang et al.</ext-link> analyzed meibomian gland energy curves derived from upper eyelid infrared imaging to quantify structural changes associated with Demodex infestation, and proposed that the derived parameters can serve as early, non-invasive biomarkers for dry eye and chronic ocular surface inflammation, thus providing a new &#x201c;structure&#x2013;function&#x201d; quantitative pathway.</p>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Orbital and systemic-related ocular conditions</title>
<p>For orbital diseases, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1609231">Han et al.</ext-link> developed a deep learning model for diagnosing thyroid-associated ophthalmopathy (TED) based on facial photographs and clinical records. The model achieved high accuracy for key phenotypes such as inflammation associated with clinical activity score (CAS), eyelid retraction and motility restriction, and emphasized that visual explanations are critical for supporting individualized treatment planning. In traumatic orbital fractures, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1459040">Li et al.</ext-link> built an AI-assisted diagnostic and treatment decision system using CT images, which improved the accuracy of detecting trapdoor fractures and reduced surgical complications, thereby laying the groundwork for future intelligent surgical robots and intraoperative navigation systems.</p>
<p>In summary, this chapter provides a panoramic view of AI in the diagnosis and screening of chronic ocular diseases, illustrating a cross-disease, multimodal technological progression from retinal diseases to glaucoma, cataract, ocular surface and orbital disorders.</p>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>AI-derived ocular biomarkers and systemic chronic diseases</title>
<p>The eye is often described as a &#x201c;window to systemic diseases&#x201d;. Numerous studies have focused on extracting ocular imaging biomarkers related to cardiovascular and metabolic conditions.</p>
<sec id="s3-1">
<label>3.1</label>
<title>Cardiovascular and metabolic diseases</title>
<p>In coronary artery disease (CAD), <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1654159">Zhou et al.</ext-link> used OCTA to derive quantitative retinal microvascular features and developed a CAD-assisted diagnostic model, demonstrating that retinal OCTA biomarkers can serve as standardized tools for early CAD screening and support cardiology decision-making. Regarding hypertensive eye disease, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1459040">Li et al.</ext-link> conducted a systematic review summarizing deep learning models based on fundus photographs for early hypertension screening and risk stratification, while highlighting key limitations in model generalizability, robustness to low-quality images and interpretability. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1581785">Ding et al.</ext-link> further leveraged deep learning to extract features from OCTA, more precisely characterizing hypertension-related changes in retinal vascular morphology and perfusion, and showing that AI has considerable potential as a high-throughput screening tool for hypertension and its complications.</p>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Diabetic peripheral neuropathy</title>
<p>In diabetic peripheral neuropathy (DPN), <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608494">Chen et al.</ext-link> used corneal confocal microscopy (CCM) images to compare transformer-based and CNN-based networks for DPN binary classification, and found that a transformer-based DLA exhibited higher potential for rapid screening. Such work brings &#x201c;ocular micro-neural structures&#x201d; into AI pipelines for systemic diabetes management and represents an important component in constructing lifelong disease trajectories.</p>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Cross-organ interactions and microvascular aging</title>
<p>In terms of retinal vasculature and optic nerve head morphology, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1693739">Bai et al.</ext-link> compared populations with high <italic>versus</italic> low cerebro-cardiovascular risk and found significant differences in vascular complexity and cup-to-disc area ratio, providing ocular imaging evidence for cardiovascular risk stratification. In a cohort of patients with type 2 diabetes mellitus (T2DM), <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1550176">Chen et al.</ext-link> reported that specific retinal vascular parameters&#x2014;such as mean branch segment length and vessel density&#x2014;were significantly associated with mild-to-moderate non-proliferative DR (NPDR), suggesting that these features may serve as preclinical biomarkers of DR-related microvascular abnormalities.</p>
<p>From the perspective of hemodynamics and vessel wall structure, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1467374">Jiang et al.</ext-link> used spectral-domain OCT (SD-OCT) and the full-width at half maximum (FWHM) method to noninvasively measure a series of parameters (including RALD, RAOD, RVLD, RVOD, AWT, VWT and AVR), and demonstrated that they were significantly associated with internal carotid artery stenosis. In another study, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1467374">Jiang et al.</ext-link> applied OCTA and PKSEA-Net to analyze retinal microvascular morphology in individuals with gestational diabetes mellitus (GDM), suggesting that AI tools could enable early detection of microvascular alterations in high-risk pregnant populations.</p>
<p>In addition, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1518154">Wei et al.</ext-link> used intelligent quantitative algorithms and found that allergic conjunctivitis significantly altered meibomian gland length and central morphology, providing quantifiable structural indicators for chronic ocular surface inflammation. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1619956">Hao et al.</ext-link> combined pattern visual evoked potentials (PVEP) with machine learning to predict best-corrected visual acuity in patients with ocular trauma, establishing a pathway for coupling imaging and functional evaluation.</p>
<p>Taken together, this chapter extends the focus from local ocular phenotypes to &#x201c;eye&#x2013;heart&#x2013;brain&#x2013;metabolic&#x201d; interactions, markedly enhancing the interdisciplinary depth of AI research in chronic ocular diseases.</p>
</sec>
</sec>
<sec id="s4">
<label>4</label>
<title>Quantitative analysis, segmentation and clinical workflow integration</title>
<sec id="s4-1">
<label>4.1</label>
<title>Structural segmentation and morphological quantification</title>
<p>In structural segmentation, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1604832">Huang et al.</ext-link> designed a large-kernel multi-scale attention module for choroidal vessel segmentation on OCT, effectively addressing challenges such as low contrast and fuzzy boundaries. Based on the segmentation results, they performed three-dimensional reconstruction and morphometric analysis, revealing significant structural differences in the choroid between highly myopic and healthy eyes. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1532228">Xie et al.</ext-link> proposed M3B-Net for retinal vessel segmentation on combined UWF and FFA images, introducing a selective fusion module (SFM), a local perception fusion module (LPFM) and an attention-guided upsampling module (AUM). The model significantly improved segmentation of fine vessels in ultra-widefield high-resolution images. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1477819">Liu et al.</ext-link> validated DS2TUNet on multiple public datasets (DRIVE, CHASE_DB1, ROSE-1) and a clinical dataset of central serous chorioretinopathy (CSC), showing that the model achieved state-of-the-art or superior performance across multiple metrics and exhibited strong cross-dataset transferability.</p>
<p>These studies indicate that AI-driven fine-grained segmentation and 3D reconstruction have become essential technical supports for quantifying structural changes in chronic ocular diseases.</p>
</sec>
<sec id="s4-2">
<label>4.2</label>
<title>Clinical workflow integration and intelligent systems</title>
<p>For image quality control, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1501625">Li et al.</ext-link> developed the DeepMonitoring system using corneal images acquired via smartphones. The system can both determine whether an image is of low quality and localize the source of the quality Research Topic, guiding operators to retake images when necessary and thus ensuring input quality for mobile ophthalmic AI systems. In real-world applications for chronic ocular diseases, such QA/QC is a prerequisite for stable model deployment.</p>
<p>In another study, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1501625">Li et al.</ext-link> proposed the MOSAIC system, which integrates ocular surface images and textual information and demonstrates strong few-shot learning capability: even with limited training data, it can accurately manage ocular surface diseases. Its potential for deployment on mobile devices is particularly well aligned with the needs of primary care and resource-limited settings.</p>
<p>In multimodal question answering and large model applications, Xue et al. explored the use of ChatGPT for triaging ocular trauma in emergency settings, showing that it may improve preliminary assessment and triage efficiency but still exhibits clear limitations in understanding clinical images. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1574378">Yang et al.</ext-link> showed that customized large language models (LLMs) can become effective educational tools for medical students and ophthalmologists. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fmed.2025.1534294">Zeng et al.</ext-link> incorporated ChatGPT into traditional teaching to enhance understanding of rare diseases such as retinitis pigmentosa (RP), demonstrating that AI as a knowledge-augmentation tool can complement conventional education and foreshadowing &#x201c;hybrid AI-augmented teaching&#x201d; paradigms.</p>
<p>With respect to system-level AI solutions, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1642539">Zhuang et al.</ext-link> proposed a multimodal agent that illustrates both the feasibility and necessity of constructing specialized AI solutions for complex clinical scenarios.</p>
<p>Overall, this chapter illustrates how AI technologies have evolved from &#x201c;research-grade models&#x201d; to &#x201c;systems that can be embedded into clinical workflows&#x201d;, forming an end-to-end path from segmentation and parameter extraction to standardization, quality control and AI agents.</p>
</sec>
</sec>
<sec id="s5">
<label>5</label>
<title>AI-assisted surgical decision-making, treatment planning and multi-omics insights</title>
<sec id="s5-1">
<label>5.1</label>
<title>Surgical planning and perioperative risk prediction</title>
<p>Research on surgery and treatment demonstrates the value of AI throughout the perioperative cycle, from preoperative decision support through intraoperative safety management to postoperative prognostication.</p>
<p>In glaucoma surgery, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1654719">Liu et al.</ext-link> showed that anatomical features of patients with primary angle-closure glaucoma (PACG) are major determinants of refractive instability after phacoemulsification with intraocular lens implantation (PE &#x2b; IOL), based on preoperative refraction and ocular biometry. They recommended that axial length (AL), lens thickness (LT), white-to-white corneal diameter (WTW) and the AL/corneal radius (AL/CR) ratio should be jointly considered when selecting IOL power. In a systematic review, Kailani et al. concluded that existing AI models for glaucoma surgery prediction generally outperform traditional statistical methods, but are limited by lack of external validation and heterogeneous success criteria. They stressed the need for multicenter prospective studies and open datasets to improve model reliability.</p>
<p>In refractive and cataract surgery, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1477819">Liu et al.</ext-link> demonstrated that ocular rotational magnitude in SMILE procedures is significantly associated with residual postoperative astigmatism, providing quantitative evidence to inform preoperative planning and rotational compensation strategies. Su et al. constructed a surgical decision model for highly myopic cataract based on slit-lamp images, OCT and biometry, achieving good performance in complex decision-making scenarios. Regarding intraoperative complications, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1487482">Zhu et al.</ext-link> used preoperative panoramic corneal images and surgical videos to build models that predict OBL patterns and the risk of OBL formation during SMILE procedures. Both deep residual networks and GAN-based models proved effective, supporting preoperative risk stratification and intraoperative warning.</p>
</sec>
<sec id="s5-2">
<label>5.2</label>
<title>Treatment response prediction and longitudinal follow-up</title>
<p>For pharmacological treatment and prognostication, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1609567">Feng et al.</ext-link> used OCT images to generate post&#x2013;anti-VEGF treatment response images, demonstrating the potential of generative models in predicting treatment outcomes. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1612303">Wang et al.</ext-link> employed SS-OCTA to evaluate retinal and choroidal hemodynamic changes in patients with atrial fibrillation after radiofrequency catheter ablation (RFCA). Changes in choroidal thickness and CVI were found to possibly reflect compensatory redistribution of cardiac output, suggesting that ocular microcirculation may serve as a sensitive biomarker for monitoring anti-arrhythmic therapy. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1603958">Lu et al.</ext-link> combined OCT structural features with blood metabolic and hematologic indicators to build a model predicting anatomical response to anti-VEGF therapy in diabetic macular edema (DME), thereby enabling individualized treatment optimization.</p>
<p>In long-term monitoring of orthokeratology (ortho-K), <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1517240">Wu et al.</ext-link> analyzed blinking behavior in children using a deep learning system based on U-Net and Swin Transformer. They found that increased frequency of incomplete blinks was closely associated with reduced tear film stability, suggesting that blinking patterns can be an important monitoring indicator for long-term ortho-K wear.</p>
<p>Collectively, these studies demonstrate the system-level value of AI in &#x201c;perioperative management&#x201d; of chronic ocular diseases, forming a closed loop from precise preoperative assessment and intraoperative risk control to postoperative outcome prediction and functional monitoring.</p>
</sec>
<sec id="s5-3">
<label>5.3</label>
<title>Multi-omics-driven mechanistic studies</title>
<p>Multi-omics research provides a new dimension for understanding mechanisms and targeted interventions in chronic ocular diseases.</p>
<p>In aqueous humor proteomics, <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/journals/cell-and-developmental-biology/articles/10.3389/fcell.2025.1583330/full">Huang et al.</ext-link> analyzed age-related changes in the aqueous humor proteome, identifying 179 significantly age-associated proteins and further selecting 11 characteristic proteins for aqueous age prediction and 22 potential regulators. Their findings highlighted oxidative damage, matrix dysregulation and protein homeostasis imbalance as hallmark signatures of ocular aging. Regarding central nervous system involvement in DR, Huang et al. were the first to integrate functional MRI (fMRI) with transcriptomics to elucidate the genetic determinants of disrupted interhemispheric connectivity in DR, pointing to the complex interplay of neurovascular, metabolic and neurodegenerative pathways as key contributors to DR-related cognitive and visual dysfunction.</p>
<p>In thyroid-associated ophthalmopathy, <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/journals/cell-and-developmental-biology/articles/10.3389/fcell.2024.1486170/full">Shu et al.</ext-link> analyzed gene expression profiles of TED-related lacrimal gland hypertrophy and orbital fat expansion, revealing significant pathophysiological differences between these two manifestations and building a TED diagnostic prediction model based on KIAA0319 and PRDX4. This work provides both omics-level evidence and a modeling prototype for noninvasive, prospective TED diagnosis.</p>
<p>In summary, this chapter illustrates the expansion of AI from a &#x201c;clinical imaging tool&#x201d; to an integrated role in &#x201c;surgical medicine, treatment response and mechanistic discovery&#x201d;, serving as an important conceptual upgrade in the context of chronic ocular disease research.</p>
</sec>
</sec>
<sec id="s6">
<label>6</label>
<title>Review and public health perspectives</title>
<p>Several systematic reviews and methodological studies provide a high-level perspective that underpins this narrative review.</p>
<p>For DR, the systematic review and meta-analysis by <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fmed.2025.1519768">Tahir et al.</ext-link> showed that AI-assisted screening achieves higher sensitivity than human graders while maintaining comparable specificity, supporting the use of AI as a reliable alternative or adjunct for DR screening. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1473176">Xu et al.</ext-link> systematically reviewed AI models for DR diagnosis and treatment from a methodological standpoint, discussing issues of bias, transparency and ethics, and outlining prospects for clinical translation.</p>
<p>For pathologic myopia and cataract management, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fmed.2025.1572750">He et al.</ext-link> and <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1611216">Lu et al.</ext-link> summarized the progress of AI in screening, grading and prognostic assessment, emphasizing the importance of AI for long-term management of progressive chronic eye diseases. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1609028">Li et al.</ext-link>reviewed AI models based on fundus imaging for hypertensive eye disease, discussing their current status and limitations.</p>
<p>With respect to LLMs and multimodal medical foundation models, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1513971">Zhang et al.</ext-link> outlined their potential roles in screening, decision-making and individualized treatment, and identified three major barriers to clinical adoption: difficulties in acquiring multimodal data, limited interpretability and the lack of standardized validation frameworks. From a public health perspective, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1576465">Chen et al.</ext-link> systematically analyzed the value of AI in large-scale screening, disease surveillance, telemedicine and continuing education, highlighting the &#x201c;amplifier&#x201d; role of AI in strengthening ophthalmic public health capacity.</p>
<p>These review-oriented contributions furnish a robust evidence base for the overarching framework of &#x201c;Artificial Intelligence Applications in Chronic Ocular Diseases&#x201d; and empirically support the research directions summarized in this article.</p>
</sec>
<sec id="s7">
<label>7</label>
<title>Future directions: toward unified and clinically deployable AI for chronic ocular diseases</title>
<p>Based on the evidence above, future directions for AI in chronic ocular diseases can be summarized as follows.</p>
<sec id="s7-1">
<label>7.1</label>
<title>From single-disease models to cross-disease, cross-organ unified risk assessment platforms</title>
<p>Existing work has extended beyond DR to encompass glaucoma, pathologic myopia, cataract, TED, dry eye and ocular surface disorders, while also identifying ocular biomarkers for systemic diseases such as CAD, hypertension and DPN (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1654159">Zhou et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1600202">Li et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1581785">Ding et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1576465">Chen et al.</ext-link>). It will be necessary to establish joint risk scoring systems covering the &#x201c;eye&#x2013;brain&#x2013;heart&#x2013;metabolic&#x201d; axis and to realize multi-disease risk co-assessment and long-term follow-up within a unified feature space integrating phenotypes, omics and imaging.</p>
</sec>
<sec id="s7-2">
<label>7.2</label>
<title>Multimodal and multi-scale fusion: integrated modeling of imaging, text, omics and longitudinal data</title>
<p>From Fusion-MIL (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1665173">Gu et al.</ext-link>) and knowledge-enhanced pretraining (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1630667">Duan et al.</ext-link>) to the joint analysis of aqueous humor proteomics and fMRI&#x2013;transcriptomics (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1583330">Huang et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1604832">Huang et al.</ext-link>), it is evident that future models should not be confined to a single modality. The development of multimodal models capable of simultaneously handling 2D/3D imaging, clinical text, omics data and longitudinal follow-up will be key to achieving a &#x201c;vertical&#x2013;horizontal integrated&#x201d; panoramic view of chronic ocular disease.</p>
</sec>
<sec id="s7-3">
<label>7.3</label>
<title>Foundation models and specialty-specific multimodal large models</title>
<p>Current research has begun to explore GPT-like large language models (LLMs) in medical education (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fmed.2025.1534294">Zeng et al.</ext-link>), clinical question answering (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1564054">Xue et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1574378">Yang et al.</ext-link>) and multimodal decision support (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1513971">Zhang et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1642539">Zhuang et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1501625">Li et al.</ext-link>). In the future, ophthalmology-focused multimodal foundation models could be developed using large-scale, unlabeled ophthalmic images, electronic medical records and omics data for self-supervised pretraining, followed by adaptation to multiple downstream tasks with limited labeled data. This would enable a new paradigm in which &#x201c;one foundation model supports multiple tasks and scenarios&#x201d;.</p>
</sec>
<sec id="s7-4">
<label>7.4</label>
<title>From black box to interpretability and controllability: trustworthy AI and causal inference</title>
<p>Many studies have underscored the lack of interpretability as a key obstacle to clinical deployment of AI (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608994">Li et al.</ext-link> , <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608988">Zhang et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fdata.2025.1605018">Kailani et al.</ext-link>). Future work needs to incorporate attention visualization, inherently interpretable architectures, causal inference methods and uncertainty quantification into AI models for chronic ocular diseases, thereby enhancing transparency of the decision process and providing clinicians with traceable chains of evidence (for example, the specific vessel segments or choroidal layers contributing to a decision). This, in turn, will help generate new hypotheses about disease mechanisms.</p>
</sec>
<sec id="s7-5">
<label>7.5</label>
<title>Robustness and domain generalization in real-world settings</title>
<p>Real-world management of chronic ocular diseases is characterized by multiple centers, devices, ethnicities and coexisting diseases. The OCT platform conversion study by <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1612455">Tian et al.</ext-link>, the image quality monitoring work by <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608994">Li et al.</ext-link>, and cross-dataset validation of segmentation models (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1477819">Liu et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1532228">Xie et al.</ext-link>) provide practical examples for improving robustness. Future research should systematically address domain generalization and domain adaptation, and build continually learning systems capable of online updating to cope with device upgrades, changes in imaging protocols and population heterogeneity.</p>
</sec>
<sec id="s7-6">
<label>7.6</label>
<title>From tools to decision-making loops: workflow-embedded AI solutions</title>
<p>From system-level tools such as MOSAIC and Fusion-MIL (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1608994">Li et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1665173">Gu et al.</ext-link>) to glaucoma and cataract surgical decision models (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1477819">Liu et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1613634">Su et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fdata.2025.1605018">Kailani et al.</ext-link>), AI is moving from a peripheral auxiliary role into central positions within clinical workflows. Future efforts should emphasize deep integration with hospital information systems (HIS), electronic medical records (EMR), surgical navigation and telemedicine platforms, forming a complete decision-making loop that spans &#x201c;screening&#x2013;diagnosis&#x2013;surgery&#x2013;follow-up&#x2013;public health&#x201d;.</p>
</sec>
<sec id="s7-7">
<label>7.7</label>
<title>Ethics, regulation and standardized evaluation frameworks</title>
<p>Review articles have consistently highlighted ethical, privacy and standardization gaps (<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1473176">Xu et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2024.1513971">Zhang et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fcell.2025.1576465">Chen et al.</ext-link>, <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fdata.2025.1605018">Kailani et al.</ext-link>). Because chronic ocular disease AI involves long-term follow-up and allocation of healthcare resources, it is especially prone to issues of fairness and accessibility. Future work should promote the development of data standards, model registration systems and tiered validation frameworks at international and national levels to ensure safety, fairness and sustainability of AI systems across regions and populations.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s8">
<label>8</label>
<title>Conclusion</title>
<p>Artificial intelligence is rapidly transforming the landscape of chronic ocular disease research and clinical practice. Across diagnostic modeling, ocular biomarker discovery, quantitative analytics, surgical decision-making, public health applications and mechanistic multi-omics studies, the 57 accepted papers in this research topic collectively demonstrate the breadth and maturity of AI-driven innovation in ophthalmology. The evidence synthesized in this editorial illustrates how AI has evolved from single-modality diagnostic tools into integrated, multimodal ecosystems capable of supporting risk stratification, longitudinal monitoring, precision surgical planning and interdisciplinary insights linking ocular and systemic health. Despite these advances, major challenges persist&#x2014;including limited generalizability across devices and populations, insufficient interpretability, heterogeneous validation standards and the absence of unified regulatory frameworks. Addressing these gaps will require the development of robust, transparent and clinically grounded AI systems, as well as large-scale multimodal datasets, specialty-specific foundation models and workflow-embedded decision-support pipelines. Looking ahead, the convergence of imaging, clinical data, omics and large language models promises to reshape the management of chronic ocular diseases, paving the way toward unified, equitable and clinically deployable AI platforms that can support lifelong ocular health.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>WY: Writing &#x2013; review and editing. HF: Writing &#x2013; original draft. YX: Writing &#x2013; review and editing. WC: Writing &#x2013; review and editing.</p>
</sec>
<ack>
<title>Acknowledgements</title>
<p>We are grateful for the contribution of every participant in this Research Topic.</p>
</ack>
<sec sec-type="COI-statement" id="s11">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The authors WY, HF, YX and WC declared that they were an editorial board member of Frontiers at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec sec-type="ai-statement" id="s12">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. The author(s) used ChatGPT to polish the language. The author(s) verify and take full responsibility for the use of generative AI in the preparation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s13">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cong</surname>
<given-names>Y. Y.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>W. Y.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Automatic assessment of anterior ciliary position in primary angle-closure glaucoma based on deep learning</article-title>. <source>Chin. J. Exp. Ophthalmol.</source> <volume>42</volume> (<issue>12</issue>), <fpage>1134</fpage>&#x2013;<lpage>1141</lpage>. <pub-id pub-id-type="doi">10.3760/cma.j.cn115989-20240328-00085</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feng</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Lou</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A bibliometric analysis of artificial intelligence applications in macular edema: exploring research hotspots and frontiers</article-title>. <source>Front. Cell Dev. Biol.</source> <volume>11</volume>, <fpage>1174936</fpage>. <pub-id pub-id-type="doi">10.3389/fcell.2023.1174936</pub-id>
<pub-id pub-id-type="pmid">37255600</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gong</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>W. T.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X. M.</given-names>
</name>
<name>
<surname>Wan</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Y. J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S. J.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Development and research status of intelligent ophthalmology in China</article-title>. <source>Int. J. Ophthalmol.</source> <volume>17</volume> (<issue>12</issue>), <fpage>2308</fpage>&#x2013;<lpage>2315</lpage>. <pub-id pub-id-type="doi">10.18240/ijo.2024.12.20</pub-id>
<pub-id pub-id-type="pmid">39697896</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hui</surname>
<given-names>M. Y.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>J. L.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>X. H.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Analysis of fundus vascular image features of healthy people based on deep learning technology</article-title>. <source>Int. J. Ophthalmol.</source> <volume>24</volume> (<issue>10</issue>), <fpage>1542</fpage>&#x2013;<lpage>1550</lpage>. <pub-id pub-id-type="doi">10.3980/j.issn.1672-5123.2024.10.05</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ji</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Hong</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Advances in artificial intelligence applications for ocular surface diseases diagnosis</article-title>. <source>Front. Cell Developmental Biology</source> <volume>10</volume>, <fpage>1107689</fpage>. <pub-id pub-id-type="doi">10.3389/fcell.2022.1107689</pub-id>
<pub-id pub-id-type="pmid">36605721</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jiang</surname>
<given-names>W. Y.</given-names>
</name>
<name>
<surname>Yan</surname>
<given-names>Y. L.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>S. M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>An automatic evaluation system for anterior chamber angle ultrasound biomicroscopy images based on deep learning algorithm</article-title>. <source>Int. J. Ophthalmol.</source> <volume>23</volume> (<issue>5</issue>), <fpage>833</fpage>&#x2013;<lpage>842</lpage>. <pub-id pub-id-type="doi">10.3980/j.issn.1672-5123.2023.5.23</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Using deep leaning models to detect ophthalmic diseases: a comparative study</article-title>. <source>Front. Med.</source> <volume>10</volume>, <fpage>1115032</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2023.1115032</pub-id>
<pub-id pub-id-type="pmid">36936225</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>H. S.</given-names>
</name>
<name>
<surname>Lv</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Opportunities and challenges of digital ophthalmology diagnosis and treatment model</article-title>. <source>Digital Med. Health</source> <volume>2</volume> (<issue>4</issue>), <fpage>213</fpage>&#x2013;<lpage>218</lpage>. <pub-id pub-id-type="doi">10.3760/cma.j.cn101909-20240512-00103</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2025a</year>). <source>Dataset, baseline and evaluation design for GAVE Challenge[C]//International workshop on ophthalmic medical image analysis</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer Nature Switzerland</publisher-name>, <fpage>158</fpage>&#x2013;<lpage>167</lpage>.</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2025b</year>). &#x201c;<article-title>Leveraging diffusion models for continual test-time adaptation in fundus image Classification</article-title>,&#x201d; in <source>International conference on medical image computing and computer-assisted intervention</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer Nature Switzerland</publisher-name>, <fpage>337</fpage>&#x2013;<lpage>347</lpage>.</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mimazhuoma</surname>
<given-names>C. Y. P.</given-names>
</name>
<name>
<surname>Ji</surname>
<given-names>Y. K.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Research progress and thinking on artificial intelligence-assisted diagnosis of pterygium based on anterior segment photography</article-title>. <source>Digital Med. Health</source> <volume>1</volume> (<issue>2</issue>), <fpage>115</fpage>&#x2013;<lpage>120</lpage>. <pub-id pub-id-type="doi">10.3760/cma.j.cn101909-20230707-00013</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ming</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Research status and prospect of artificial intelligence-assisted diagnosis of diabetic retinopathy based on deep learning</article-title>. <source>Chin. J. Exp. Ophthalmol.</source> <volume>37</volume> (<issue>8</issue>), <fpage>684</fpage>&#x2013;<lpage>688</lpage>. <pub-id pub-id-type="doi">10.3760/cma.j.issn.2095-0160.2019.08.019</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qian</surname>
<given-names>C. X.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>L. X.</given-names>
</name>
<name>
<surname>Feng</surname>
<given-names>X. L.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Application of deep learning based on multi-modal data in glaucoma diagnosis and severity grading</article-title>. <source>Chin. J. Exp. Ophthalmol.</source> <volume>42</volume> (<issue>12</issue>), <fpage>1149</fpage>&#x2013;<lpage>1154</lpage>. <pub-id pub-id-type="doi">10.3760/cma.j.cn115989-20240104-00005</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Jian</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Advances and prospects of multi-modal ophthalmic artificial intelligence based on deep learning: a review</article-title>. <source>Eye Vis.</source> <volume>11</volume> (<issue>1</issue>), <fpage>38</fpage>. <pub-id pub-id-type="doi">10.1186/s40662-024-00405-1</pub-id>
<pub-id pub-id-type="pmid">39350240</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Cai</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Lei</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Predicting optical coherence tomography-derived high myopia grades from fundus photographs using deep learning</article-title>. <source>Front. Med.</source> <volume>9</volume>, <fpage>842680</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2022.842680</pub-id>
<pub-id pub-id-type="pmid">35308524</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Multi-rater prism: learning self-calibrated medical image segmentation from multiple raters</article-title>. <source>Sci. Bull.</source> <volume>69</volume> (<issue>18</issue>), <fpage>2906</fpage>&#x2013;<lpage>2919</lpage>. <pub-id pub-id-type="doi">10.1016/j.scib.2024.06.037</pub-id>
<pub-id pub-id-type="pmid">39155196</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Hong</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ji</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>Medical sam adapter: adapting segment anything model for medical image segmentation</article-title>. <source>Med. Image Analysis</source> <volume>102</volume>, <fpage>103547</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2025.103547</pub-id>
<pub-id pub-id-type="pmid">40121809</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Artificial intelligence applications in chronic ocular diseases</article-title>. <source>Front. Cell Dev. Biol.</source> <volume>11</volume>, <fpage>1295850</fpage>. <pub-id pub-id-type="doi">10.3389/fcell.2023.1295850</pub-id>
<pub-id pub-id-type="pmid">38143924</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>W. H.</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>H. H.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S. J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Artificial intelligence empowers ophthalmology clinic: making algorithms a medical assistant</article-title>. <source>Digital Med. Health</source> <volume>2</volume> (<issue>4</issue>), <fpage>209</fpage>&#x2013;<lpage>212</lpage>. <pub-id pub-id-type="doi">10.3760/cma.j.cn101909-20240516-00105</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>Y. J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Research progress of deep learning in choroidal segmentation</article-title>. <source>Int. J. Ophthalmol.</source> <volume>23</volume> (<issue>6</issue>), <fpage>1007</fpage>&#x2013;<lpage>1011</lpage>. <pub-id pub-id-type="doi">10.3980/j.issn.1672-5123.2023.6.25</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited and reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/312230/overview">Ramani Ramchandran</ext-link>, Medical College of Wisconsin, United States</p>
</fn>
</fn-group>
</back>
</article>