<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Ophthalmol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Ophthalmology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Ophthalmol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2674-0826</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fopht.2026.1766974</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Artificial intelligence in ophthalmology: trust, bias, and responsibility from the perspective of medical students and ophthalmologists</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Yousef</surname><given-names>Yacoub A.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1241366/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Shdeifat</surname><given-names>Areen</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3368714/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Yousef</surname><given-names>Leen</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Mohammad</surname><given-names>Mona</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>AlNawaiseh</surname><given-names>Tamara</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Abdullah</surname><given-names>Leena</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Elfalah</surname><given-names>Mutasem</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1070231/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Sultan</surname><given-names>Iyad</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1230976/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>AlNawaiseh</surname><given-names>Ibrahim</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3377183/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Departments of Surgery (Ophthalmology), King Hussein Cancer Centre (KHCC)</institution>, <city>Amman</city>,&#xa0;<country country="jo">Jordan</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Special Surgery, Faculty of Medicine, The University of Jordan</institution>, <city>Amman</city>,&#xa0;<country country="jo">Jordan</country></aff>
<aff id="aff3"><label>3</label><institution>International General Certificate of Secondary Education (IGCSE), Islamic Educational College</institution>, <city>Amman</city>,&#xa0;<country country="jo">Jordan</country></aff>
<aff id="aff4"><label>4</label><institution>Department of Pediatric Oncology, King Hussein Cancer Centre (KHCC)</institution>, <city>Amman</city>,&#xa0;<country country="jo">Jordan</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Yacoub A. Yousef, <email xlink:href="mailto:yyousef@khcc.jo">yyousef@khcc.jo</email>; Ibrahim AlNawaiseh, <email xlink:href="mailto:i-nawaiseh@hotmail.com">i-nawaiseh@hotmail.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-03">
<day>03</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>6</volume>
<elocation-id>1766974</elocation-id>
<history>
<date date-type="received">
<day>13</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>15</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>05</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Yousef, Shdeifat, Yousef, Mohammad, AlNawaiseh, Abdullah, Elfalah, Sultan and AlNawaiseh.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Yousef, Shdeifat, Yousef, Mohammad, AlNawaiseh, Abdullah, Elfalah, Sultan and AlNawaiseh</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-03">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Background</title>
<p>Artificial intelligence (AI) is increasingly integrated into ophthalmology, offering advances in diagnostic accuracy and surgical decision support. However, perceptions, trust, and ethical concerns regarding AI among medical students and ophthalmologists remain insufficiently explored.</p>
</sec>
<sec>
<title>Methods</title>
<p>A cross-sectional survey was conducted among 525 participants, including 353 medical students and 172 ophthalmologists. The questionnaire assessed perceptions of diagnostic reliability, AI-assisted surgical outcomes, responsibility attribution, ethical concerns, and trust in AI compared with clinician judgment.</p>
</sec>
<sec>
<title>Results</title>
<p>Most participants in both groups perceived human clinical expertise as more reliable for diagnosis than AI-driven systems (medical students 80%; ophthalmologists 72%; p = 0.054). In contrast, more than half of respondents believed AI-assisted surgery could achieve superior outcomes compared with manual techniques (medical students 55%; ophthalmologists 56%). Primary responsibility for AI-related clinical outcomes was most commonly attributed to physicians rather than AI developers (medical students 62%; ophthalmologists 66%; p = 0.666), and bias was identified as the leading ethical concern (70% of medical students and 75% of ophthalmologists). Approximately 70% of participants viewed AI as a complementary tool rather than a replacement for ophthalmologists, although nearly half anticipated AI might replace some optometric functions. In human&#x2013;AI disagreement scenarios, trust was context-dependent: 77&#x2013;79% deferred to AI when it contraindicated surgery recommended by clinicians, whereas 91% favored clinician judgment when AI recommended surgery against clinical advice. Early-career ophthalmologists demonstrated greater support for AI-assisted surgery compared with senior colleagues (p = 0.013).</p>
</sec>
<sec>
<title>Conclusion</title>
<p>Both medical students and ophthalmologists recognize AI&#x2019;s potential in ophthalmology, particularly for surgical applications, while continuing to prioritize human expertise for diagnosis. AI is largely viewed as a complementary tool, with ethical concerns surrounding bias and responsibility remaining prominent. Trust in AI varies by clinical context, and acceptance of AI-assisted surgery is greater among early-career ophthalmologists.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>clinical decision-making</kwd>
<kwd>medical education</kwd>
<kwd>ophthalmology</kwd>
<kwd>trust in AI</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported in part by King Hussein Cancer Center, Amman, Jordan (25KHCC004).</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="21"/>
<page-count count="7"/>
<word-count count="2942"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>New Technologies in Ophthalmology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<title>Introduction</title>
<p>Artificial intelligence (AI) plays an important role in transforming healthcare by enhancing diagnostic accuracy and enabling personalized and precision-based decisions. Ophthalmology, for example, has multiple AI applications in disease diagnosis, guiding treatment plans, surgical advice, and imaging interpretations, especially in retinal diseases such as diabetic retinopathy and age-related macular degeneration, where fundus photos can be read by AI-driven systems (<xref ref-type="bibr" rid="B1">1</xref>). In addition to computational power, it supports clinicians&#x2019; decision-making, boosts efficiency, and may reduce human errors (<xref ref-type="bibr" rid="B2">2</xref>).</p>
<p>However, integration of AI into ophthalmic practice raises many concerns around trust, accountability, and the evolving clinician role. Many previous surveys and reviews showed that eye care professionals are optimistic about these great advancements, but they have significant concerns about AI&#x2019;s reliability, ethical implications, and risks of professional displacement (<xref ref-type="bibr" rid="B3">3</xref>&#x2013;<xref ref-type="bibr" rid="B7">7</xref>).</p>
<p>In this study, we analyzed the perceptions of both medical students and practicing ophthalmologists (the current and future ophthalmologists) toward AI use in ophthalmology that may help in shaping ethical frameworks and educational strategies about the implementation of AI as a potential part of the standard of care in the field of ophthalmology.</p>
</sec>
<sec id="s2">
<title>Methods</title>
<p>The institutional review board (IRB) committee at King Hussein Cancer Center, Amman, Jordan, approved this cross-sectional study (25KHCC004).</p>
<sec id="s2_1">
<title>Study design and participants</title>
<p>We performed a cross-sectional survey of 525 participants: 353 medical students and 172 ophthalmologists. Participants were recruited using a convenience sampling approach through institutional mailing lists and professional networks affiliated with academic medical centers like the University of Jordan and the Jordanian Ophthalmic Society, with a completion count of n=525. Eligible participants included medical students, ophthalmology residents, and practicing ophthalmologists. Participation was voluntary and anonymous. Medical students enrolled in accredited medical schools and ophthalmologists at any stage of training or practice were eligible to participate. Incomplete survey responses were excluded from the final analysis. The ophthalmologist group was stratified based on years of clinical experience into three groups: Residents in training, practicing ophthalmologists with &lt;10 years of experience, and practicing ophthalmologists with &gt;10 years of experience. As participation was voluntary and based on convenience sampling, selection bias cannot be excluded, and the study sample may overrepresent individuals with greater interest or familiarity with artificial intelligence.</p>
</sec>
<sec id="s2_2">
<title>Survey instrument and validation</title>
<p>The questionnaire was adapted from previously published and validated instruments assessing perceptions of artificial intelligence among eye care professionals and healthcare providers (<xref ref-type="bibr" rid="B3">3</xref>). The original tools were modified to ensure relevance to ophthalmology practice by tailoring terminology, selecting context-specific items, and incorporating human&#x2013;AI disagreement scenarios relevant to diagnostic and surgical decision-making. Demographic data included gender, training level, and years of experience. Internal consistency reliability of the scale was evaluated using Cronbach&#x2019;s alpha (&#x3b1;) and Guttman&#x2019;s lambda-2 (&#x3bb;<sub>2</sub>). Cronbach&#x2019;s alpha for the scale was 0.64, indicating a questionable low internal consistency under the assumption of tau-equivalence. Because this assumption doesn&#x2019;t hold for the present data, Guttman&#x2019;s lambda-2, which is less sensitive to unequal item loadings, was also computed. The lambda-2 coefficient was 0.72, suggesting acceptable reliability. Item-rest correlations showed moderate associations across items, supporting internal coherence of the instrument.</p>
</sec>
<sec id="s2_3">
<title>Survey-assessed parameters</title>
<list list-type="order">
<list-item>
<p>Diagnostic reliability: preference of human expertise vs. AI diagnosis capability.</p></list-item>
<list-item>
<p>Surgical outcomes: perceptions of AI-assisted vs. traditional surgery.</p></list-item>
<list-item>
<p>Responsibility attribution: physicians vs. AI developers for AI-driven outcomes.</p></list-item>
<list-item>
<p>Challenges: concerns about bias, ethics, and healthcare disparities.</p></list-item>
<list-item>
<p>Trust in AI vs. clinicians: responses to hypothetical clinical-AI conflicts.</p></list-item>
</list>
</sec>
<sec id="s2_4">
<title>Data collection and analysis</title>
<p>This study was exploratory in nature. Given the multiple comparisons performed, no formal adjustment for multiple testing was applied. The analyses were intended to be descriptive and hypothesis-generating rather than confirmatory, and the potential risk of false-positive results is acknowledged. Descriptive statistics and chi-square tests compared categorical data. P value less than 0.05 was considered significant.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<title>Results</title>
<sec id="s3_1">
<title>Participant demographics</title>
<p>Among 525 respondents, 353 (67%) were medical students and 172 (33%) were ophthalmologists. Ophthalmologists included residents (35%), those with &lt;10 years&#x2019; practice (38%), and those with&gt;10 years (26%). Most participants were female (67%) (<xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>).</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Participants&#x2019; Demographics.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Variable</th>
<th valign="middle" align="center">Total</th>
<th valign="middle" align="center">Medical Students</th>
<th valign="middle" align="center">Ophthalmologists</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Total number</td>
<td valign="middle" align="center">525</td>
<td valign="middle" align="center">353 (67%)</td>
<td valign="middle" align="center">172 (33%)</td>
</tr>
<tr>
<th valign="middle" colspan="4" align="left">Gender</th>
</tr>
<tr>
<td valign="middle" align="left">Female</td>
<td valign="middle" align="center">352 (67%)</td>
<td valign="middle" align="center">272 (77%)</td>
<td valign="middle" align="center">80 (47%)</td>
</tr>
<tr>
<td valign="middle" align="left">Male</td>
<td valign="middle" align="center">173 (33%)</td>
<td valign="middle" align="center">81 (23%)</td>
<td valign="middle" align="center">92 (53%)</td>
</tr>
<tr>
<th valign="middle" colspan="4" align="left">Age</th>
</tr>
<tr>
<td valign="middle" align="left">Under 48</td>
<td valign="middle" align="center">393 (75%)</td>
<td valign="middle" align="center">353 (100%)</td>
<td valign="middle" align="center">40 (23%)</td>
</tr>
<tr>
<td valign="middle" align="left">48&#x2013;80 Year</td>
<td valign="middle" align="center">97 (18%)</td>
<td valign="middle" align="center">0</td>
<td valign="middle" align="center">97 (57%)</td>
</tr>
<tr>
<td valign="middle" align="left">More than 80</td>
<td valign="middle" align="center">35 (7%)</td>
<td valign="middle" align="center">0</td>
<td valign="middle" align="center">35 (20%)</td>
</tr>
<tr>
<td valign="middle" align="left">Experience in AI</td>
<td valign="middle" align="center">225 (43%)</td>
<td valign="middle" align="center">166 (74%)</td>
<td valign="middle" align="center">59 (34%)</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_2">
<title>Perceptions of AI integration</title>
<p>Most participants favored human expertise for diagnosis, while over half viewed AI-assisted surgery as potentially beneficial. Physicians were seen as primarily responsible for AI outcomes, and algorithmic bias was cited more than healthcare disparities as the main ethical concern. No significant differences were observed between students and ophthalmologists (<xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>).</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Perceptions of medical students and ophthalmologists regarding artificial intelligence in ophthalmology practice.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Question</th>
<th valign="middle" align="left">Choices</th>
<th valign="middle" colspan="2" align="left">Medical students (353) (N, %)</th>
<th valign="middle" colspan="2" align="left">Ophthalmologists (172) (N, %)</th>
<th valign="middle" align="left">P value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" rowspan="2" align="left">Reliability<break/></td>
<td valign="middle" align="left">AI-driven systems</td>
<td valign="middle" align="left">72</td>
<td valign="middle" align="left">20%</td>
<td valign="middle" align="left">48</td>
<td valign="middle" align="left">28%</td>
<td valign="middle" align="left">0.054</td>
</tr>
<tr>
<td valign="middle" align="left">Human expertise</td>
<td valign="middle" align="left">281</td>
<td valign="middle" align="left">80%</td>
<td valign="middle" align="left">124</td>
<td valign="middle" align="left">72%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Outcomes<break/></td>
<td valign="middle" align="left">AI-assisted surgery</td>
<td valign="middle" align="left">193</td>
<td valign="middle" align="left">55%</td>
<td valign="middle" align="left">96</td>
<td valign="middle" align="left">56%</td>
<td valign="middle" align="left">0.851</td>
</tr>
<tr>
<td valign="middle" align="left">Manual surgery</td>
<td valign="middle" align="left">160</td>
<td valign="middle" align="left">45%</td>
<td valign="middle" align="left">76</td>
<td valign="middle" align="left">44%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Responsibility<break/></td>
<td valign="middle" align="left">Physician</td>
<td valign="middle" align="left">219</td>
<td valign="middle" align="left">62%</td>
<td valign="middle" align="left">113</td>
<td valign="middle" align="left">66%</td>
<td valign="middle" align="left">0.666</td>
</tr>
<tr>
<td valign="middle" align="left">Developers</td>
<td valign="middle" align="left">134</td>
<td valign="middle" align="left">38%</td>
<td valign="middle" align="left">59</td>
<td valign="middle" align="left">33%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Challenges<break/></td>
<td valign="middle" align="left">Healthcare disparities</td>
<td valign="middle" align="left">107</td>
<td valign="middle" align="left">30%</td>
<td valign="middle" align="left">43</td>
<td valign="middle" align="left">25%</td>
<td valign="middle" align="left">0.206</td>
</tr>
<tr>
<td valign="middle" align="left">Bias and ethics</td>
<td valign="middle" align="left">246</td>
<td valign="middle" align="left">70%</td>
<td valign="middle" align="left">129</td>
<td valign="middle" align="left">75%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Opportunities (Ophthalmologists)<break/></td>
<td valign="middle" align="left">Complement and create</td>
<td valign="middle" align="left">248</td>
<td valign="middle" align="left">70%</td>
<td valign="middle" align="left">120</td>
<td valign="middle" align="left">70%</td>
<td valign="middle" align="left">0.908</td>
</tr>
<tr>
<td valign="middle" align="left">Replace roles</td>
<td valign="middle" align="left">105</td>
<td valign="middle" align="left">30%</td>
<td valign="middle" align="left">52</td>
<td valign="middle" align="left">30%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Opportunities (Optometry)<break/></td>
<td valign="middle" align="left">Complement and create</td>
<td valign="middle" align="left">179</td>
<td valign="middle" align="left">51%</td>
<td valign="middle" align="left">80</td>
<td valign="middle" align="left">47%</td>
<td valign="middle" align="left">0.366</td>
</tr>
<tr>
<td valign="middle" align="left">Replace roles</td>
<td valign="middle" align="left">174</td>
<td valign="middle" align="left">49%</td>
<td valign="middle" align="left">92</td>
<td valign="middle" align="left">53%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Skills<break/></td>
<td valign="middle" align="left">Enhance skills</td>
<td valign="middle" align="left">232</td>
<td valign="middle" align="left">66%</td>
<td valign="middle" align="left">110</td>
<td valign="middle" align="left">64%</td>
<td valign="middle" align="left">0.689</td>
</tr>
<tr>
<td valign="middle" align="left">Reduce skills</td>
<td valign="middle" align="left">121</td>
<td valign="middle" align="left">34%</td>
<td valign="middle" align="left">62</td>
<td valign="middle" align="left">36%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Relationship<break/></td>
<td valign="middle" align="left">Strengthen relationship</td>
<td valign="middle" align="left">156</td>
<td valign="middle" align="left">44%</td>
<td valign="middle" align="left">76</td>
<td valign="middle" align="left">44%</td>
<td valign="middle" align="left">0.998</td>
</tr>
<tr>
<td valign="middle" align="left">Weaken relationship</td>
<td valign="middle" align="left">197</td>
<td valign="middle" align="left">56%</td>
<td valign="middle" align="left">96</td>
<td valign="middle" align="left">56%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Expected outcome<break/></td>
<td valign="middle" align="left">AI recommendations</td>
<td valign="middle" align="left">36</td>
<td valign="middle" align="left">10%</td>
<td valign="middle" align="left">11</td>
<td valign="middle" align="left">6%</td>
<td valign="middle" align="left">0.152</td>
</tr>
<tr>
<td valign="middle" align="left">Human expertise</td>
<td valign="middle" align="left">317</td>
<td valign="middle" align="left">90%</td>
<td valign="middle" align="left">161</td>
<td valign="middle" align="left">94%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Training<break/></td>
<td valign="middle" align="left">Collaborate with AI</td>
<td valign="middle" align="left">277</td>
<td valign="middle" align="left">78%</td>
<td valign="middle" align="left">140</td>
<td valign="middle" align="left">81%</td>
<td valign="middle" align="left">0.436</td>
</tr>
<tr>
<td valign="middle" align="left">Traditional techniques</td>
<td valign="middle" align="left">76</td>
<td valign="middle" align="left">22%</td>
<td valign="middle" align="left">32</td>
<td valign="middle" align="left">19%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="3" align="left">Leadership<break/><break/></td>
<td valign="middle" align="left">Lead in diagnosis</td>
<td valign="middle" align="left">116</td>
<td valign="middle" align="left">33%</td>
<td valign="middle" align="left">57</td>
<td valign="middle" align="left">33%</td>
<td valign="middle" align="left">0.84</td>
</tr>
<tr>
<td valign="middle" align="left">Lead in all</td>
<td valign="middle" align="left">156</td>
<td valign="middle" align="left">44%</td>
<td valign="middle" align="left">80</td>
<td valign="middle" align="left">47%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" align="left">Not lead</td>
<td valign="middle" align="left">81</td>
<td valign="middle" align="left">23%</td>
<td valign="middle" align="left">35</td>
<td valign="middle" align="left">20%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Choice<break/></td>
<td valign="middle" align="left">Rely on AI</td>
<td valign="middle" align="left">62</td>
<td valign="middle" align="left">18%</td>
<td valign="middle" align="left">11</td>
<td valign="middle" align="left">6%</td>
<td valign="middle" align="left">0.0005</td>
</tr>
<tr>
<td valign="middle" align="left">Supportive tool</td>
<td valign="middle" align="left">291</td>
<td valign="middle" align="left">82%</td>
<td valign="middle" align="left">161</td>
<td valign="middle" align="left">94%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="3" align="left">Role<break/><break/></td>
<td valign="middle" align="left">disruptive tool</td>
<td valign="middle" align="left">17</td>
<td valign="middle" align="left">5%</td>
<td valign="middle" align="left">11</td>
<td valign="middle" align="left">6%</td>
<td valign="middle" align="left">0.354</td>
</tr>
<tr>
<td valign="middle" align="left">Supportive tool</td>
<td valign="middle" align="left">240</td>
<td valign="middle" align="left">70%</td>
<td valign="middle" align="left">107</td>
<td valign="middle" align="left">62%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" align="left">transformative force</td>
<td valign="middle" align="left">96</td>
<td valign="middle" align="left">27%</td>
<td valign="middle" align="left">54</td>
<td valign="middle" align="left">32%</td>
<td valign="middle" align="left"/>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_3">
<title>Trust in clinical decision-making</title>
<p>In paradoxical scenarios, 90% of students and 94% of ophthalmologists trusted human judgment over AI. Over two-thirds supported AI collaboration in education. Ophthalmologists were more likely than students to see AI as supportive rather than replacing human decisions (94% vs. 82%; <xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>).</p>
</sec>
<sec id="s3_4">
<title>Analysis of clinical&#x2013;AI conflicts</title>
<p>Participants showed asymmetric trust: 77&#x2013;79% followed AI when it contraindicated surgery, whereas 91% favored clinicians when AI recommended surgery against clinical advice.</p>
</sec>
<sec id="s3_5">
<title>Experience-related trends</title>
<p>Ophthalmologists with &lt;10 years&#x2019; experience or in training were more supportive of AI-assisted surgery than those with &gt;10 years (p = 0.013). No other experience-related differences were observed (<xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>).</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Subgroup analysis of ophthalmologists&#x2019; attitudes toward ai based on experience level.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Question</th>
<th valign="middle" align="center">Choices</th>
<th valign="middle" colspan="2" align="center">Ophthalmologists &gt;10 years (N = 45)</th>
<th valign="middle" colspan="2" align="center">Ophthalmologists &lt;10 years (n=66)</th>
<th valign="middle" colspan="2" align="center">residents (n=61)</th>
<th valign="middle" align="center">P value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" rowspan="2" align="left">Reliability<break/></td>
<td valign="middle" align="left">AI-driven systems</td>
<td valign="middle" align="left">17</td>
<td valign="middle" align="left">38%</td>
<td valign="middle" align="left">17</td>
<td valign="middle" align="left">26%</td>
<td valign="middle" align="left">12</td>
<td valign="middle" align="left">20%</td>
<td valign="middle" align="left">0.051</td>
</tr>
<tr>
<td valign="middle" align="left">Human expertise</td>
<td valign="middle" align="left">28</td>
<td valign="middle" align="left">62%</td>
<td valign="middle" align="left">49</td>
<td valign="middle" align="left">74%</td>
<td valign="middle" align="left">49</td>
<td valign="middle" align="left">80%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Outcomes<break/></td>
<td valign="middle" align="left">AI-assisted surgery</td>
<td valign="middle" align="left">9</td>
<td valign="middle" align="left">20%</td>
<td valign="middle" align="left">31</td>
<td valign="middle" align="left">47%</td>
<td valign="middle" align="left">28</td>
<td valign="middle" align="left">46%</td>
<td valign="middle" align="left">0.013</td>
</tr>
<tr>
<td valign="middle" align="left">Manual surgery</td>
<td valign="middle" align="left">36</td>
<td valign="middle" align="left">80%</td>
<td valign="middle" align="left">35</td>
<td valign="middle" align="left">53%</td>
<td valign="middle" align="left">33</td>
<td valign="middle" align="left">54%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Responsibility<break/></td>
<td valign="middle" align="left">Physician</td>
<td valign="middle" align="left">32</td>
<td valign="middle" align="left">71%</td>
<td valign="middle" align="left">46</td>
<td valign="middle" align="left">70%</td>
<td valign="middle" align="left">39</td>
<td valign="middle" align="left">64%</td>
<td valign="middle" align="left">0.605</td>
</tr>
<tr>
<td valign="middle" align="left">Developers</td>
<td valign="middle" align="left">13</td>
<td valign="middle" align="left">29%</td>
<td valign="middle" align="left">20</td>
<td valign="middle" align="left">30%</td>
<td valign="middle" align="left">22</td>
<td valign="middle" align="left">36%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Challenges<break/></td>
<td valign="middle" align="left">Healthcare disparities</td>
<td valign="middle" align="left">15</td>
<td valign="middle" align="left">33%</td>
<td valign="middle" align="left">12</td>
<td valign="middle" align="left">18%</td>
<td valign="middle" align="left">16</td>
<td valign="middle" align="left">26%</td>
<td valign="middle" align="left">0.133</td>
</tr>
<tr>
<td valign="middle" align="left">Bias and ethics</td>
<td valign="middle" align="left">30</td>
<td valign="middle" align="left">67%</td>
<td valign="middle" align="left">54</td>
<td valign="middle" align="left">82%</td>
<td valign="middle" align="left">45</td>
<td valign="middle" align="left">74%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Opportunities (Ophthalmologists)<break/></td>
<td valign="middle" align="left">Complement and create</td>
<td valign="middle" align="left">36</td>
<td valign="middle" align="left">80%</td>
<td valign="middle" align="left">41</td>
<td valign="middle" align="left">62%</td>
<td valign="middle" align="left">44</td>
<td valign="middle" align="left">72%</td>
<td valign="middle" align="left">0.099</td>
</tr>
<tr>
<td valign="middle" align="left">Replace roles</td>
<td valign="middle" align="left">9</td>
<td valign="middle" align="left">20%</td>
<td valign="middle" align="left">25</td>
<td valign="middle" align="left">38%</td>
<td valign="middle" align="left">17</td>
<td valign="middle" align="left">28%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Opportunities (Optometry)<break/></td>
<td valign="middle" align="left">Complement and create</td>
<td valign="middle" align="left">24</td>
<td valign="middle" align="left">53%</td>
<td valign="middle" align="left">26</td>
<td valign="middle" align="left">39%</td>
<td valign="middle" align="left">28</td>
<td valign="middle" align="left">46%</td>
<td valign="middle" align="left">0.226</td>
</tr>
<tr>
<td valign="middle" align="left">Replace roles</td>
<td valign="middle" align="left">21</td>
<td valign="middle" align="left">47%</td>
<td valign="middle" align="left">40</td>
<td valign="middle" align="left">61%</td>
<td valign="middle" align="left">33</td>
<td valign="middle" align="left">54%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Skills<break/></td>
<td valign="middle" align="left">Enhance skills</td>
<td valign="middle" align="left">35</td>
<td valign="middle" align="left">78%</td>
<td valign="middle" align="left">43</td>
<td valign="middle" align="left">65%</td>
<td valign="middle" align="left">36</td>
<td valign="middle" align="left">59%</td>
<td valign="middle" align="left">0.057</td>
</tr>
<tr>
<td valign="middle" align="left">Reduce skills</td>
<td valign="middle" align="left">10</td>
<td valign="middle" align="left">22%</td>
<td valign="middle" align="left">23</td>
<td valign="middle" align="left">35%</td>
<td valign="middle" align="left">25</td>
<td valign="middle" align="left">41%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Relationship<break/></td>
<td valign="middle" align="left">Strengthen relationship</td>
<td valign="middle" align="left">21</td>
<td valign="middle" align="left">47%</td>
<td valign="middle" align="left">30</td>
<td valign="middle" align="left">45%</td>
<td valign="middle" align="left">25</td>
<td valign="middle" align="left">41%</td>
<td valign="middle" align="left">0.696</td>
</tr>
<tr>
<td valign="middle" align="left">Weaken relationship</td>
<td valign="middle" align="left">24</td>
<td valign="middle" align="left">53%</td>
<td valign="middle" align="left">36</td>
<td valign="middle" align="left">55%</td>
<td valign="middle" align="left">36</td>
<td valign="middle" align="left">59%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Expected outcome<break/></td>
<td valign="middle" align="left">Human expertise</td>
<td valign="middle" align="left">42</td>
<td valign="middle" align="left">93%</td>
<td valign="middle" align="left">62</td>
<td valign="middle" align="left">94%</td>
<td valign="middle" align="left">57</td>
<td valign="middle" align="left">93%</td>
<td valign="middle" align="left">0.931</td>
</tr>
<tr>
<td valign="middle" align="left">AI recommendations</td>
<td valign="middle" align="left">3</td>
<td valign="middle" align="left">7%</td>
<td valign="middle" align="left">4</td>
<td valign="middle" align="left">6%</td>
<td valign="middle" align="left">4</td>
<td valign="middle" align="left">7%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Training<break/></td>
<td valign="middle" align="left">Collaborate with AI</td>
<td valign="middle" align="left">39</td>
<td valign="middle" align="left">87%</td>
<td valign="middle" align="left">56</td>
<td valign="middle" align="left">85%</td>
<td valign="middle" align="left">47</td>
<td valign="middle" align="left">77%</td>
<td valign="middle" align="left">0.498</td>
</tr>
<tr>
<td valign="middle" align="left">Traditional techniques</td>
<td valign="middle" align="left">6</td>
<td valign="middle" align="left">13%</td>
<td valign="middle" align="left">11</td>
<td valign="middle" align="left">15%</td>
<td valign="middle" align="left">14</td>
<td valign="middle" align="left">23%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="3" align="left">Leadership<break/><break/></td>
<td valign="middle" align="left">Lead in diagnosis</td>
<td valign="middle" align="left">14</td>
<td valign="middle" align="left">31%</td>
<td valign="middle" align="left">24</td>
<td valign="middle" align="left">36%</td>
<td valign="middle" align="left">12</td>
<td valign="middle" align="left">20%</td>
<td valign="middle" align="left">0.66</td>
</tr>
<tr>
<td valign="middle" align="left">Lead in all</td>
<td valign="middle" align="left">25</td>
<td valign="middle" align="left">56%</td>
<td valign="middle" align="left">32</td>
<td valign="middle" align="left">49%</td>
<td valign="middle" align="left">22</td>
<td valign="middle" align="left">36%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" align="left">Not lead</td>
<td valign="middle" align="left">6</td>
<td valign="middle" align="left">13%</td>
<td valign="middle" align="left">10</td>
<td valign="middle" align="left">15%</td>
<td valign="middle" align="left">17</td>
<td valign="middle" align="left">28%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="2" align="left">Choice<break/></td>
<td valign="middle" align="left">Rely on AI</td>
<td valign="middle" align="left">3</td>
<td valign="middle" align="left">7%</td>
<td valign="middle" align="left">6</td>
<td valign="middle" align="left">9%</td>
<td valign="middle" align="left">1</td>
<td valign="middle" align="left">2%</td>
<td valign="middle" align="left">0.776</td>
</tr>
<tr>
<td valign="middle" align="left">Supportive tool</td>
<td valign="middle" align="left">42</td>
<td valign="middle" align="left">93%</td>
<td valign="middle" align="left">60</td>
<td valign="middle" align="left">91%</td>
<td valign="middle" align="left">60</td>
<td valign="middle" align="left">98%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" rowspan="3" align="left">Role<break/><break/></td>
<td valign="middle" align="left">Transformative tool</td>
<td valign="middle" align="left">14</td>
<td valign="middle" align="left">31%</td>
<td valign="middle" align="left">22</td>
<td valign="middle" align="left">33%</td>
<td valign="middle" align="left">17</td>
<td valign="middle" align="left">28%</td>
<td valign="middle" align="left">0.921</td>
</tr>
<tr>
<td valign="middle" align="left">Supportive tool</td>
<td valign="middle" align="left">28</td>
<td valign="middle" align="left">62%</td>
<td valign="middle" align="left">38</td>
<td valign="middle" align="left">58%</td>
<td valign="middle" align="left">43</td>
<td valign="middle" align="left">71%</td>
<td valign="middle" align="left"/>
</tr>
<tr>
<td valign="middle" align="left">Disruptive force</td>
<td valign="middle" align="left">3</td>
<td valign="middle" align="left">7%</td>
<td valign="middle" align="left">6</td>
<td valign="middle" align="left">9%</td>
<td valign="middle" align="left">1</td>
<td valign="middle" align="left">2%</td>
<td valign="middle" align="left"/>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<title>Discussion</title>
<p>The results of our study provide a comprehensive analysis of the perceptions of both medical students and ophthalmologists in a homogenous community towards the use of artificial intelligence (AI) in ophthalmology practice. Overall, our findings showed that AI is preferred as a complementary supportive tool that can help in diagnosis and recommendations for treatment, rather than a replacement for human expertise. This is expected because of the known promising AI capabilities and potential, but with a substantial limitations and ethical challenges (<xref ref-type="bibr" rid="B8">8</xref>&#x2013;<xref ref-type="bibr" rid="B10">10</xref>). However, it is important to emphasize that our findings reflect perceptions and attitudes derived from hypothetical scenarios rather than observed clinical behavior.</p>
<p>Diagnostic reliability remains a major concern for practicing physicians when it comes to the integration of AI into clinical practice. Eighty percent of medical students and 72% of ophthalmologists still favor human clinical judgment over AI in diagnostic decision-making, which is consistent with prior literature emphasizing the importance of contextual and experimental knowledge that AI systems often lack (<xref ref-type="bibr" rid="B11">11</xref>). Diagnostic process in ophthalmology involves making correlations between clinical findings, patient history, and disease manifestations that current AI models may not fully capture, especially given their training on limited or homogeneous datasets. This underscores the necessity for clinicians to use AI cautiously, rather than deferring blindly. However, the perception that AI-assisted surgical techniques and recommendations could yield a better outcome was endorsed by more than half of participants in this study (55% of medical students and 56% of ophthalmologists), highlighting a growing confidence in AI&#x2019;s ability to enhance procedural precision and consistency (<xref ref-type="bibr" rid="B12">12</xref>). Surgical applications of AI, such as robotic-assisted microsurgery and intraoperative guidance, have demonstrated promise in improving outcomes by minimizing human error, reducing fatigue-related variability, and enabling data-driven customization of surgical plans. This growing confidence in AI capabilities and in machine learning in general comes from the understanding that the AI mechanism of work depends on repetitive, high-precision tasks, and surgeons believe that these capabilities can help and enhance a surgeon&#x2019;s performance and surgical outcomes. This perspective is consistent with findings from other fields, including neurosurgery and orthopedics (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B13">13</xref>).</p>
<p>Surprisingly, we find that the trust in AI is variable according to the clinical scenarios involving a paradox between clinical human judgment and AI recommendations. When AI contradicted a clinician&#x2019;s positive assessment of the expected outcome for refractive surgery, the majority (77%) deferred to AI recommendations, suggesting that participants regard AI as an important safeguard against overtreatment and potential procedural risks. This tendency was even stronger among medical students (79%) compared to ophthalmologists (72%), which indicates that less clinical experience leads to greater reliance on algorithmic caution than on human judgment. In contrast, when AI claimed that refractive surgery is safe and recommended in a situation where clinicians advised against this procedure, 91% of participants sided with the clinicians, reflecting enduring confidence in human expertise to make high-stakes, risk-averse decisions.</p>
<p>This asymmetry in trust distribution highlights a key psychological behavior: AI is predominantly trusted as a risk mitigation tool, especially in identifying when intervention may be unwarranted or harmful, but human clinical judgment retains primacy in approving high-risk procedures (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B14">14</xref>). Such dynamics have been reported in radiology and oncology, where AI is welcomed for flagging suspicious findings or predicting adverse events but is less trusted to overrule experienced clinicians&#x2019; decisions to withhold treatment (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B13">13</xref>). This phenomenon also highlights the importance of understanding AI not as an autonomous decision-maker but as a collaborator that enhances clinical safety. These interpretations should be considered cautiously, as psychological explanations for trust dynamics were not directly measured and remain speculative.</p>
<p>Ethical and practical concerns about AI&#x2019;s implementation were prominent. The leading ethical concern in this study was focused on bias and ethics rather than healthcare disparities, as advocated by 69% of medical students and 76% of ophthalmologists. These concerns are well-founded, given that AI algorithms often derive from datasets that lack diversity, leading to potential misdiagnoses or inequitable care in underrepresented populations (<xref ref-type="bibr" rid="B15">15</xref>). The risk of systemic biases through AI could exacerbate existing health disparities unless human efforts are made to validate the algorithms to ensure fairness and transparency. Additionally, concerns about accountability because clinicians found it hard to trust AI models whose decision pathways are not clearly understood by humans, raising challenges in clinical validation and medico-legal liability (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B17">17</xref>).</p>
<p>The vast majority of participants still believe that the responsibility for AI-driven clinical outcomes should be assigned to physicians rather than AI developers, and this aligns with current ethical and regulatory paradigms that place ultimate accountability on human clinicians. This finding emphasizes the importance of continuous clinical validation of all AI tools and for clear regulatory guidelines that define the boundaries of AI&#x2019;s clinical role. Without such frameworks, there is a risk of legal ambiguity and erosion of trust among both practitioners and patients.</p>
<p>As expected, our data showed certain differences in perception toward AI within the ophthalmologist cohort. Those with fewer than 10 years of experience or who are ophthalmology residents in training were significantly more supportive of AI-assisted surgical interventions than ophthalmologists with more than 10 years of practicing experience. This trend likely reflects increased exposure to digital technologies during training and a greater openness to technological innovation in the young group (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B19">19</xref>). It also suggests that early and sustained incorporation of AI literacy into medical education could foster more uniform acceptance and more effective human-AI collaboration in future clinical practice. More senior physicians are still trusting human skills more. This explains why about one-third of participants are worried that increased reliance on AI could impair the clinical judgment capabilities and worsen the surgical skills among future physicians (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B19">19</xref>). The greater enthusiasm for AI-assisted surgery observed among residents and early-career ophthalmologists may reflect several factors beyond digital familiarity alone. These include differences in training environments, levels of surgical autonomy, perceived medico-legal responsibility, risk tolerance, and greater exposure to supervised or technology-augmented surgical workflows during training. In contrast, senior ophthalmologists may rely more heavily on experiential judgment developed over years of independent practice, contributing to more cautious adoption of AI-assisted interventions. Furthermore, more than half of the participants also believed that AI might negatively affect the doctor-patient relationship, possibly out of concern that it could make care feel less personal or reduce the sense of empathy. These findings underscore the importance of integrating AI in ways that uphold the human side of medicine, emphasizing communication, empathy, and ethical responsibility.</p>
<p>The support for incorporating AI training into ophthalmology curricula (69-71%) is encouraging, as it is a new tendency in medical education towards creating a structured, longitudinal education to improved algorithm literacy to ensure clinicians develop the skills necessary to critically appraise and effectively use AI tools (<xref ref-type="bibr" rid="B15">15</xref>, <xref ref-type="bibr" rid="B19">19</xref>). Such educational initiatives will be crucial to avoid both over-reliance and undue skepticism and to cultivate the competence and confidence necessary for future ophthalmologists getting the maximum benefits of AI safely.</p>
<p>While our study provides valuable insights, it is important to consider its limitations. The use of hypothetical clinical scenarios, while methodologically necessary to control variables, may not capture the full complexity and time pressures inherent in real-world clinical decision-making. As respondents&#x2019; answers reflect idealized rather than practical behaviors, these findings should be viewed as a baseline for future qualitative study. Furthermore, the sample likely includes participants with more interest or familiarity with AI, potentially biasing results toward more favorable views. The study&#x2019;s regional focus may limit generalizability, as attitudes toward AI can vary widely based on cultural, institutional, and healthcare system differences (<xref ref-type="bibr" rid="B20">20</xref>). Future research should explore these factors in a diverse larger population. Additionally, the regional nature of the study population may limit generalizability, as attitudes toward artificial intelligence can vary across healthcare systems, regulatory environments, and cultural contexts.</p>
<p>In summary, AI in ophthalmology is a promising, yet context-dependent, adjunct whose greatest value lies in complementing human clinical judgment rather than replacing it (<xref ref-type="bibr" rid="B21">21</xref>). The nuanced interplay of trust, risk perception, and ethical considerations revealed here should guide future AI deployment in clinical practice. We recommend integrating AI education throughout medical training, establishing clear ethical and regulatory frameworks, and designing specialty-specific models of human-AI collaboration that optimize patient outcomes without compromising the clinical role of physicians. This balanced approach will be essential as ophthalmology navigates the transformative potential of AI.</p>
</sec>
<sec id="s5" sec-type="conclusions">
<title>Conclusion</title>
<p>Artificial intelligence is a valuable tool in ophthalmology, with trust depending mainly on clinical context and perceived risk. AI is most trusted to avoid over-treatment, while clinician judgment remains decisive in high-stakes decisions. To maximize AI benefits, we need to integrate AI training in medical education, clarify accountability, and collaborative frameworks by making ethical guidelines that regulate human-AI interaction. Combining clinical sense and AI algorithms will be key to future ophthalmic practice.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p></sec>
<sec id="s7" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by IRB at King Hussein Cancer Center. The studies were conducted in accordance with the local legislation and institutional requirements. The ethics committee/institutional review board waived the requirement of written informed consent for participation from the participants or the participants&#x2019; legal guardians/next of kin because participation of the survey means the participant is willing to do it.</p></sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>YY: Conceptualization, Data curation, Formal Analysis, Methodology, Validation, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. AS: Data curation, Formal Analysis, Software, Writing &#x2013; original draft. LY: Formal Analysis, Software, Writing &#x2013; original draft. MM: Conceptualization, Methodology, Validation, Writing &#x2013; original draft. TA: Data curation, Formal Analysis, Writing &#x2013; original draft. LA: Data curation, Formal Analysis, Writing &#x2013; original draft. ME: Methodology, Validation, Writing &#x2013; review &amp; editing. IS: Supervision, Validation, Writing &#x2013; review &amp; editing. IA: Conceptualization, Supervision, Validation, Writing &#x2013; review &amp; editing.</p></sec>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<sec id="s13" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fopht.2026.1766974/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fopht.2026.1766974/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Supplementaryfile1.doc" id="SM1" mimetype="application/msword"><label>SUPPLEMENTARY FILE 1</label>
<caption>
<p>Summary of survey-assessed parameters and demographics.</p>
</caption></supplementary-material></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ting</surname> <given-names>DSW</given-names></name>
<name><surname>Pasquale</surname> <given-names>LR</given-names></name>
<name><surname>Peng</surname> <given-names>L</given-names></name>
<name><surname>Campbell</surname> <given-names>JP</given-names></name>
<name><surname>Lee</surname> <given-names>AY</given-names></name>
<name><surname>Raman</surname> <given-names>R</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence and deep learning in ophthalmology</article-title>. <source>Br J Ophthalmol</source>. (<year>2019</year>) <volume>103</volume>:<page-range>167&#x2013;75</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1136/bjophthalmol-2018-313173</pub-id>, PMID: <pub-id pub-id-type="pmid">30361278</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<label>2</label>
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Beede</surname> <given-names>E</given-names></name>
<name><surname>Baylor</surname> <given-names>E</given-names></name>
<name><surname>Hersch</surname> <given-names>F</given-names></name>
<name><surname>Iurchenko</surname> <given-names>A</given-names></name>
<name><surname>Wilcox</surname> <given-names>L</given-names></name>
<name><surname>Ruamviboonsuk</surname> <given-names>P</given-names></name>
<etal/>
</person-group>. &#x201c;
<article-title>A human-centered evaluation of a deep learning system deployed in clinics for the detection of diabetic retinopathy</article-title>,&#x201d; In: <conf-name>Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems</conf-name>, <publisher-loc>New York, NY, USA</publisher-loc>: 
<publisher-name>Association for Computing Machinery</publisher-name>. (<year>2020</year>). pp. <fpage>1</fpage>&#x2013;<lpage>12</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/3313831.3376718</pub-id>, PMID: <pub-id pub-id-type="pmid">40727313</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<label>3</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Tan</surname> <given-names>TF</given-names></name>
<name><surname>Thirunavukarasu</surname> <given-names>AJ</given-names></name>
<name><surname>Jin</surname> <given-names>L</given-names></name>
<name><surname>Lim</surname> <given-names>J</given-names></name>
<name><surname>Poh</surname> <given-names>S</given-names></name>
<name><surname>Teo</surname> <given-names>ZL</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence and digital health in global eye health: opportunities and challenges</article-title>. <source>Lancet Glob Health</source>. (<year>2023</year>) <volume>11</volume>:<page-range>e1432&#x2013;43</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2214-109X(23)00323-6</pub-id>, PMID: <pub-id pub-id-type="pmid">37591589</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<label>4</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gunasekeran</surname> <given-names>DV</given-names></name>
<name><surname>Wong</surname> <given-names>TY</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in ophthalmology in 2020: a technology on the cusp for translation</article-title>. <source>Asia Pac J Ophthalmol (Phila)</source>. (<year>2020</year>) <volume>9</volume>:<page-range>11&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/APO.0000000000000292</pub-id>, PMID: <pub-id pub-id-type="pmid">32487917</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<label>5</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Idogen</surname> <given-names>OS</given-names></name>
</person-group>. 
<article-title>The perspectives of eye care professionals on the integration of artificial intelligence in eye care practices: a systematic review</article-title>. <source>Artif Intell Healthc</source>. (<year>2024</year>) <volume>1</volume>:<fpage>66</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.36922/aih.2809</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<label>6</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bellemo</surname> <given-names>V</given-names></name>
<name><surname>Lim</surname> <given-names>G</given-names></name>
<name><surname>Rim</surname> <given-names>TH</given-names></name>
<name><surname>Tan</surname> <given-names>GSW</given-names></name>
<name><surname>Cheung</surname> <given-names>CY</given-names></name>
<name><surname>Sadda</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>Artificial intelligence screening for diabetic retinopathy: the real-world emerging application</article-title>. <source>Curr Diabetes Rep</source>. (<year>2019</year>) <volume>19</volume>:<fpage>72</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11892-019-1187-4</pub-id>, PMID: <pub-id pub-id-type="pmid">41746348</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<label>7</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Schmidt-Erfurth</surname> <given-names>U</given-names></name>
<name><surname>Sadeghipour</surname> <given-names>A</given-names></name>
<name><surname>Gerendas</surname> <given-names>BS</given-names></name>
<name><surname>Waldstein</surname> <given-names>SM</given-names></name>
<name><surname>Bogunovi&#x107;</surname> <given-names>H</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in retina</article-title>. <source>Prog Retin Eye Res</source>. (<year>2018</year>) <volume>67</volume>:<fpage>1</fpage>&#x2013;<lpage>29</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.preteyeres.2018.07.004</pub-id>, PMID: <pub-id pub-id-type="pmid">30076935</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<label>8</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Abr&#xe0;moff</surname> <given-names>MD</given-names></name>
<name><surname>Lavin</surname> <given-names>PT</given-names></name>
<name><surname>Birch</surname> <given-names>M</given-names></name>
<name><surname>Shah</surname> <given-names>N</given-names></name>
<name><surname>Folk</surname> <given-names>JC</given-names></name>
</person-group>. 
<article-title>Pivotal trial of an autonomous AI-based diagnostic system for the detection of diabetic retinopathy in primary care offices</article-title>. <source>NPJ Digit Med</source>. (<year>2018</year>) <volume>1</volume>:<fpage>39</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41746-018-0040-6</pub-id>, PMID: <pub-id pub-id-type="pmid">31304320</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<label>9</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Pesapane</surname> <given-names>F</given-names></name>
<name><surname>Codari</surname> <given-names>M</given-names></name>
<name><surname>Sardanelli</surname> <given-names>F</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in medical imaging: threat or opportunity</article-title>? <source>Eur Radiol Exp</source>. (<year>2018</year>) <volume>2</volume>:<fpage>35</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s41747-018-0061-6</pub-id>, PMID: <pub-id pub-id-type="pmid">30353365</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<label>10</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Esteva</surname> <given-names>A</given-names></name>
<name><surname>Robicquet</surname> <given-names>A</given-names></name>
<name><surname>Ramsundar</surname> <given-names>B</given-names></name>
<name><surname>Kuleshov</surname> <given-names>V</given-names></name>
<name><surname>DePristo</surname> <given-names>M</given-names></name>
<name><surname>Chou</surname> <given-names>K</given-names></name>
<etal/>
</person-group>. 
<article-title>A guide to deep learning in healthcare</article-title>. <source>Nat Med</source>. (<year>2019</year>) <volume>25</volume>:<page-range>24&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41591-018-0316-z</pub-id>, PMID: <pub-id pub-id-type="pmid">30617335</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<label>11</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rajpurkar</surname> <given-names>P</given-names></name>
<name><surname>Irvin</surname> <given-names>J</given-names></name>
<name><surname>Ball</surname> <given-names>RL</given-names></name>
<name><surname>Zhu</surname> <given-names>K</given-names></name>
<name><surname>Yang</surname> <given-names>B</given-names></name>
<name><surname>Mehta</surname> <given-names>H</given-names></name>
<etal/>
</person-group>. 
<article-title>Deep learning for chest radiograph diagnosis: a retrospective comparison of the CheXNeXt algorithm to practicing radiologists</article-title>. <source>PloS Med</source>. (<year>2018</year>) <volume>15</volume>:<fpage>e1002686</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1371/journal.pmed.1002686</pub-id>, PMID: <pub-id pub-id-type="pmid">30457988</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<label>12</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Yu</surname> <given-names>KH</given-names></name>
<name><surname>Beam</surname> <given-names>AL</given-names></name>
<name><surname>Kohane</surname> <given-names>IS</given-names></name>
</person-group>. 
<article-title>Artificial intelligence in healthcare</article-title>. <source>Nat BioMed Eng</source>. (<year>2018</year>) <volume>2</volume>:<page-range>719&#x2013;31</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41551-018-0305-z</pub-id>, PMID: <pub-id pub-id-type="pmid">31015651</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<label>13</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mehrabi</surname> <given-names>N</given-names></name>
<name><surname>Morstatter</surname> <given-names>F</given-names></name>
<name><surname>Saxena</surname> <given-names>N</given-names></name>
<name><surname>Lerman</surname> <given-names>K</given-names></name>
<name><surname>Galstyan</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>A survey on bias and fairness in machine learning</article-title>. <source>ACM Comput Surv</source>. (<year>2021</year>) <volume>54</volume>:<fpage>1</fpage>&#x2013;<lpage>35</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/3457607</pub-id>, PMID: <pub-id pub-id-type="pmid">40727313</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<label>14</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Topol</surname> <given-names>EJ</given-names></name>
</person-group>. 
<article-title>High-performance medicine: the convergence of human and artificial intelligence</article-title>. <source>Nat Med</source>. (<year>2019</year>) <volume>25</volume>:<fpage>44</fpage>&#x2013;<lpage>56</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41591-018-0300-7</pub-id>, PMID: <pub-id pub-id-type="pmid">30617339</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<label>15</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>London</surname> <given-names>AJ</given-names></name>
</person-group>. 
<article-title>Artificial intelligence and black-box medical decisions: accuracy versus explainability</article-title>. <source>Hastings Cent Rep</source>. (<year>2019</year>) <volume>49</volume>:<fpage>15</fpage>&#x2013;<lpage>21</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/hast.973</pub-id>, PMID: <pub-id pub-id-type="pmid">30790315</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<label>16</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Char</surname> <given-names>DS</given-names></name>
<name><surname>Shah</surname> <given-names>NH</given-names></name>
<name><surname>Magnus</surname> <given-names>D</given-names></name>
</person-group>. 
<article-title>Implementing machine learning in health care&#x2014;addressing ethical challenges</article-title>. <source>N Engl J Med</source>. (<year>2018</year>) <volume>378</volume>:<page-range>981&#x2013;3</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1056/NEJMp1714229</pub-id>, PMID: <pub-id pub-id-type="pmid">29539284</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<label>17</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kolachalama</surname> <given-names>VB</given-names></name>
<name><surname>Garg</surname> <given-names>PS</given-names></name>
</person-group>. 
<article-title>Machine learning and medical education</article-title>. <source>NPJ Digit Med</source>. (<year>2018</year>) <volume>1</volume>:<fpage>54</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41746-018-0061-1</pub-id>, PMID: <pub-id pub-id-type="pmid">31304333</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<label>18</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wartman</surname> <given-names>SA</given-names></name>
<name><surname>Combs</surname> <given-names>CD</given-names></name>
</person-group>. 
<article-title>Medical education must move from the information age to the age of artificial intelligence</article-title>. <source>Acad Med</source>. (<year>2018</year>) <volume>93</volume>:<page-range>1107&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/ACM.0000000000002044</pub-id>, PMID: <pub-id pub-id-type="pmid">29095704</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<label>19</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Blease</surname> <given-names>C</given-names></name>
<name><surname>Kharko</surname> <given-names>A</given-names></name>
<name><surname>Li</surname> <given-names>M</given-names></name>
<name><surname>Mittal</surname> <given-names>S</given-names></name>
<name><surname>Torous</surname> <given-names>J</given-names></name>
<name><surname>Kaptchuk</surname> <given-names>TJ</given-names></name>
</person-group>. 
<article-title>Artificial intelligence and the future of psychiatry: insights from a global physician survey</article-title>. <source>NPJ Digit Med</source>. (<year>2023</year>) <volume>6</volume>:<fpage>112</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41746-023-00846-8</pub-id>, PMID: <pub-id pub-id-type="pmid">41735448</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<label>20</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kermany</surname> <given-names>DS</given-names></name>
<name><surname>Goldbaum</surname> <given-names>M</given-names></name>
<name><surname>Cai</surname> <given-names>W</given-names></name>
<name><surname>Valentim</surname> <given-names>CCS</given-names></name>
<name><surname>Liang</surname> <given-names>H</given-names></name>
<name><surname>Baxter</surname> <given-names>SL</given-names></name>
<etal/>
</person-group>. 
<article-title>Identifying medical diagnoses and treatable diseases by image-based deep learning</article-title>. <source>Cell</source>. (<year>2018</year>) <volume>172</volume>:<fpage>1122</fpage>&#x2013;<lpage>1131.e9</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.cell.2018.02.010</pub-id>, PMID: <pub-id pub-id-type="pmid">29474911</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<label>21</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ting</surname> <given-names>DSW</given-names></name>
<name><surname>Liu</surname> <given-names>Y</given-names></name>
<name><surname>Burlina</surname> <given-names>P</given-names></name>
<name><surname>Xu</surname> <given-names>X</given-names></name>
<name><surname>Bressler</surname> <given-names>NM</given-names></name>
<name><surname>Wong</surname> <given-names>TY</given-names></name>
</person-group>. 
<article-title>AI for medical imaging goes deep</article-title>. <source>Nat Med</source>. (<year>2018</year>) <volume>24</volume>:<page-range>539&#x2013;40</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41591-018-0016-5</pub-id>, PMID: <pub-id pub-id-type="pmid">41735448</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2333729">Meng Wang</ext-link>, Technology and Research (ASTAR), Singapore</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2777424">Sheila John</ext-link>, Sankara Nethralaya, India</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3151868">Uyen Nguyen</ext-link>, Dong Thap Medical College, Vietnam</p></fn>
</fn-group>
</back>
</article>