<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="review-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Digit. Health</journal-id><journal-title-group>
<journal-title>Frontiers in Digital Health</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Digit. Health</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-253X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdgth.2025.1740557</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Systematic Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Artificial intelligence, extended reality, and emerging AI&#x2013;XR integrations in medical education</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Tene</surname><given-names>Talia</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/2377430/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Vique L&#x00F3;pez</surname><given-names>Diego Fabi&#x00E1;n</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2382064/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Garc&#x00ED;a Veloz</surname><given-names>Marlene Jacqueline</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Rojas Oviedo</surname><given-names>Byron Stalin</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Tene-Fernandez</surname><given-names>Richard</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Chemistry, Universidad T&#x00E9;cnica Particular de Loja</institution>, <city>Loja</city>, <country country="ec">Ecuador</country></aff>
<aff id="aff2"><label>2</label><institution>Facultad de Salud P&#x00FA;blica, Escuela Superior Polit&#x00E9;cnica de Chimborazo (ESPOCH)</institution>, <city>Riobamba</city>, <country country="ec">Ecuador</country></aff>
<aff id="aff3"><label>3</label><institution>Facultad de Ciencias, Escuela Superior Polit&#x00E9;cnica de Chimborazo (ESPOCH)</institution>, <city>Riobamba</city>, <country country="ec">Ecuador</country></aff>
<aff id="aff4"><label>4</label><institution>Hospital Metropolitano de Quito</institution>, <city>Quito</city>, <country country="ec">Ecuador</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Talia Tene <email xlink:href="mailto:tbtene@utpl.edu.ec">tbtene@utpl.edu.ec</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-09"><day>09</day><month>01</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2025</year></pub-date>
<volume>7</volume><elocation-id>1740557</elocation-id>
<history>
<date date-type="received"><day>06</day><month>11</month><year>2025</year></date>
<date date-type="rev-recd"><day>09</day><month>12</month><year>2025</year></date>
<date date-type="accepted"><day>12</day><month>12</month><year>2025</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 Tene, Vique L&#x00F3;pez, Garc&#x00ED;a Veloz, Rojas Oviedo and Tene-Fernandez.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>Tene, Vique L&#x00F3;pez, Garc&#x00ED;a Veloz, Rojas Oviedo and Tene-Fernandez</copyright-holder><license><ali:license_ref start_date="2026-01-09">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<abstract><sec><title>Introduction</title>
<p>Artificial intelligence (AI) and extended reality (XR)&#x2014;including virtual, augmented, and mixed reality&#x2014;are increasingly adopted in health-professions education. However, the educational impact of AI, XR, and especially their combined use within integrated AI&#x2013;XR ecosystems remains incompletely characterized.</p>
</sec><sec><title>Objective</title>
<p>To synthesize empirical evidence on educational outcomes and implementation considerations for AI-, XR-, and combined AI&#x2013;XR&#x2013;based interventions in medical and health-professions education.</p>
</sec><sec><title>Methods</title>
<p>Following PRISMA and PICO guidance, we searched three databases (Scopus, PubMed, IEEE Xplore) and screened records using predefined eligibility criteria targeting empirical evaluations in health-professions education. After deduplication (336 records removed) and two-stage screening, 13 studies published between 2019 and 2024 were included. Data were extracted on learner population, clinical domain, AI/XR modality, comparators, outcomes, and implementation factors, and narratively synthesized due to heterogeneity in designs and measures.</p>
</sec><sec><title>Results</title>
<p>The 13 included studies involved undergraduate and postgraduate learners in areas such as procedural training, clinical decision-making, and communication skills. Only a minority explicitly integrated AI with XR within the same intervention; most evaluated AI-based or XR-based approaches in isolation. Across this mixed body of work, studies more often than not reported gains in at least one outcome&#x2014;knowledge or skills performance, task accuracy, procedural time, or learner engagement&#x2014;relative to conventional instruction, alongside generally high acceptability. Recurrent constraints included costs, technical reliability, usability, faculty readiness, digital literacy, and data privacy and ethics concerns.</p>
</sec><sec><title>Conclusions</title>
<p>Current evidence on AI, XR, and emerging AI&#x2013;XR integrations suggests promising but preliminary benefits for learning and performance. The small number of fully integrated AI&#x2013;XR interventions and the methodological limitations of many primary studies substantially limit the certainty and generalizability of these findings. Future research should use more rigorous and standardized designs, explicitly compare AI-only, XR-only, and AI&#x2013;XR hybrid approaches, and be coupled with faculty development, robust technical support, and alignment with competency-based assessment.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>ChatGPT</kwd>
<kwd>competency-based education</kwd>
<kwd>digital literacy</kwd>
<kwd>health education</kwd>
<kwd>prompt engineering</kwd>
<kwd>simulation</kwd>
</kwd-group><funding-group><award-group id="gs1"><funding-source id="sp1"><institution-wrap><institution>Universidad T&#x00E9;cnica Particular de Loja</institution><institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/100019349</institution-id></institution-wrap></funding-source></award-group><funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work has been partially supported by Universidad T&#x00E9;cnica Particular de Loja under the grand No. POA_VIN-54.</funding-statement></funding-group><counts>
<fig-count count="6"/>
<table-count count="6"/><equation-count count="0"/><ref-count count="59"/><page-count count="18"/><word-count count="1110"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Connected Health</meta-value></custom-meta></custom-meta-group>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><label>1</label><title>Introduction</title>
<p>Medical education is undergoing a paradigm shift. Traditional approaches, characterized by didactic lectures, static textbooks (<xref ref-type="bibr" rid="B1">1</xref>&#x2013;<xref ref-type="bibr" rid="B3">3</xref>), and episodic clinical rotations, increasingly struggle to meet the demands of a rapidly evolving healthcare system. Today, medical students must acquire extensive biomedical knowledge while developing clinical reasoning, communication, empathy (<xref ref-type="bibr" rid="B2">2</xref>, <xref ref-type="bibr" rid="B4">4</xref>), and procedural skills in settings constrained by time and patient safety. This growing complexity, together with the limits of conventional methods, has driven the search for more flexible, effective, and student-centered strategies (<xref ref-type="bibr" rid="B5">5</xref>).</p>
<p>Technological innovation has become a central driver of this transformation, with simulation platforms, e-learning modules, and digital assessment tools gaining widespread adoption (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B4">4</xref>). Among these, artificial intelligence (AI) and extended reality (XR) stand out as particularly disruptive and promising, offering new possibilities for training future healthcare professionals (<xref ref-type="bibr" rid="B5">5</xref>, <xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>In medical education, AI underpins adaptive learning platforms, intelligent tutoring systems, virtual patient simulations, automated feedback mechanisms, and predictive analytics (<xref ref-type="bibr" rid="B7">7</xref>&#x2013;<xref ref-type="bibr" rid="B10">10</xref>), enabling individualized learning trajectories and targeted instruction. XR, encompassing virtual reality (VR), augmented reality (AR), and mixed reality (MR), provides immersive learning experiences that simulate real-life medical scenarios (<xref ref-type="bibr" rid="B8">8</xref>&#x2013;<xref ref-type="bibr" rid="B13">13</xref>). VR enables fully virtual environments (e.g., surgical procedures or emergency management), AR overlays digital information onto physical settings (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B15">15</xref>), and MR integrates real and virtual elements, allowing interaction with patients or holographic devices in real time (<xref ref-type="bibr" rid="B16">16</xref>). XR is increasingly used to teach anatomy, diagnosis, surgical skills, and interprofessional collaboration in safe, controlled, and repeatable environments (<xref ref-type="bibr" rid="B17">17</xref>).</p>
<p>The integration of AI and XR into medical education has been accelerated by several converging forces (<xref ref-type="bibr" rid="B19">19</xref>). The rapid maturation and commercialization of these technologies have made them more accessible and affordable for academic institutions (<xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B19">19</xref>), The proliferation of consumer-grade VR headsets, cloud-based AI platforms, and open-source development tools has lowered implementation barriers (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B21">21</xref>). The COVID-19 pandemic further catalyzed digital transformation in education (<xref ref-type="bibr" rid="B22">22</xref>): social distancing and the suspension of in-person clinical training compelled educators to adopt alternative modalities for delivering content and assessing competencies (<xref ref-type="bibr" rid="B23">23</xref>). AI- and XR-based solutions offered continuity alongside interactivity, scalability, and data-rich feedback (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B21">21</xref>).</p>
<p>Evolving educational paradigms, including competency-based learning, flipped classrooms, and just-in-time training, align closely with the potential of smart and immersive technologies (<xref ref-type="bibr" rid="B24">24</xref>). Innovations that combine AI and XR can support real-time assessment of clinical decision-making, situated learning in realistic contexts, and the development of non-technical skills such as teamwork and communication (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B25">25</xref>).</p>
<p>Growing evidence indicates that AI and XR can enhance student engagement, knowledge retention, and skill acquisition at various stages of medical training (<xref ref-type="bibr" rid="B26">26</xref>). VR simulations have improved procedural accuracy and confidence among trainees (<xref ref-type="bibr" rid="B19">19</xref>), while AI-based tutoring systems provide personalized feedback that may accelerate learning curves. XR applications in anatomical training enable dynamic, three-dimensional exploration beyond what static cadaver dissection (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B27">27</xref>). However, high infrastructure costs, variable user acceptance, lack of faculty training, and institutional inertia hinder wider adoption (<xref ref-type="bibr" rid="B28">28</xref>). The use of AI raises ethical concerns around data privacy, algorithmic bias, and transparency, whereas XR platforms may induce motion sickness, demand substantial computational resources, and still fall short of the unpredictability of real-life clinical environments (<xref ref-type="bibr" rid="B29">29</xref>).</p>
<p>Existing research remains fragmented, often focusing on single technologies or small-scale implementations without evaluating long-term outcomes or cost-effectiveness (<xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B25">25</xref>). There is a clear need for an integrated understanding of how AI and XR intersect, complement, and challenge each other within medical education (<xref ref-type="bibr" rid="B30">30</xref>).</p>
<p>This study synthesizes and critically analyzes the literature on the integration of AI and XR in medical education between 2019 and 2024, identifying gaps in knowledge and practice and outlining directions for future research and implementation. This narrative review adopts a multidimensional lens on the pedagogical, technological, ethical, and institutional aspects of AI and XR in medical education, focusing on how AI-only, XR-only, and emerging AI&#x2013;XR interventions are currently used to support curriculum design, assessment, and preparation for clinical practice, rather than on head-to-head comparisons between the two modalities.</p>
</sec>
<sec id="s2"><label>2</label><title>Methodology</title>
<p>This review adopted a structured and transparent methodological approach to identify, assess, and synthesize the existing literature on the integration of AI and XR technologies in medical education. The review process was based on the principles of systematic review methodology and elements adapted from the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) framework, with the overall objective of retrieving empirical studies that report on the design, implementation, and evaluation of AI-, XR-, or combined AI&#x2013;XR&#x2013;based educational interventions in medical training settings (<xref ref-type="bibr" rid="B31">31</xref>, <xref ref-type="bibr" rid="B32">32</xref>). Data collection focused exclusively on original research articles that provided measurable educational outcomes, such as knowledge acquisition, skill development, cognitive performance, or student engagement. The literature search and selection process was conducted in four sequential stages (database identification, initial screening, eligibility assessment, and final inclusion for intervention analysis). These phases relied on well-defined inclusion and exclusion criteria to ensure relevance, methodological quality, and alignment with the review objectives, and they served as the basis for a thematic and comparative analysis of trends, technologies, pedagogical strategies, and reported impacts.</p>
<sec id="s2a"><label>2.1</label><title>Review design using PRISMA and PICO methodologies</title>
<p>To guide the methodological structure of this review, we used two widely recognized frameworks: PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) and PICO (Population, Intervention, Comparison, Outcome) (<xref ref-type="bibr" rid="B31">31</xref>). PRISMA was selected to promote clarity in literature selection and transparency in reporting (<xref ref-type="bibr" rid="B32">32</xref>), structuring the study identification and selection into four stages with specific steps at each phase.</p>
<p>The PICO methodology was chosen because it allows for the formulation of structured inclusion criteria and helps maintain thematic consistency among selected studies (<xref ref-type="bibr" rid="B33">33</xref>). It is particularly useful for identifying articles focused on interventions that align with the scope of the review. This PICO framing, including the research question, was defined <italic>a priori</italic> before data extraction, and guided both the construction of the search string and the subsequent screening process. The population considered included learners across the continuum of medical and health-professions education, namely undergraduate medical students, residents, and fellows in graduate medical education, and practicing clinicians engaged in continuing professional development. No limits were imposed on clinical specialty, provided that participants were enrolled in a structured educational or training activity.</p>
<p>Interventions were grouped into three categories: (i) AI-only educational applications (e.g., natural language processing tools, generative models, intelligent tutoring systems); (ii) XR-only applications (virtual, augmented, or mixed reality&#x2013;based simulations); and (iii) integrated AI&#x2013;XR interventions in which both components were used within the same educational activity. Comparators included traditional teaching methods and non-intervention conditions, and outcomes focused on learning effectiveness. Four outcome domains were defined <italic>a priori</italic>: knowledge (scores on written or structured theoretical tests), skills (performance on practical or procedural tasks), performance (objective indicators such as error rates, completion time, or simulator-derived scores), and engagement (self-reported or behavioural indicators of motivation and participation). The main assessment instruments for each study are summarized in <xref ref-type="table" rid="T5">Table&#x00A0;5</xref>, and the overall PICO framing in <xref ref-type="table" rid="T1">Table&#x00A0;1</xref>.</p>
<table-wrap id="T1" position="float"><label>Table&#x00A0;1</label>
<caption><p>PICO framework used to define eligibility criteria for included studies.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="center">Element</th>
<th valign="top" align="center">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">P</td>
<td valign="top" align="left">Population</td>
<td valign="top" align="left">Medical students, healthcare trainees, and educators</td>
</tr>
<tr>
<td valign="top" align="left">I</td>
<td valign="top" align="left">Intervention</td>
<td valign="top" align="left">Educational use of AI, XR, or combined AI&#x2013;XR technologies (e.g., NLP, generative models, immersive VR/AR/MR simulations)</td>
</tr>
<tr>
<td valign="top" align="left">C</td>
<td valign="top" align="left">Comparison</td>
<td valign="top" align="left">Traditional instruction or no intervention</td>
</tr>
<tr>
<td valign="top" align="left">O</td>
<td valign="top" align="left">Outcome</td>
<td valign="top" align="left">Learning outcomes related to knowledge acquisition, skill development, task performance, and learner engagement.</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="TF1"><p>The model guided the identification of relevant literature focused on AI-, XR-, and combined AI&#x2013;XR&#x2013;based educational interventions and their impact on learning outcomes in medical and healthcare training contexts.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>Using the PICO model, the research question was iteratively refined to align with the objectives of this review: to explore the educational impact of Artificial Intelligence and Extended Reality technologies in medical and healthcare training. The final research question guiding this review was:</p>
<p>&#x201C;<italic>In empirical studies published between 2019 and 2024, how have AI-only, XR-only, and combined AI&#x2013;XR technologies been used in medical and health-professions training, what learning outcomes have been reported, and what implementation challenges have been described, with specific attention to the limited number of fully integrated AI&#x2013;XR interventions?&#x201D;</italic></p>
<p>Given the limited number of fully integrated AI&#x2013;XR studies, we also retained AI-only and XR-only interventions to contextualize the emerging evidence base and identify design patterns for future AI&#x2013;XR ecosystem development. Guided by our research question and the PICO framework, we implemented a structured Boolean search organized into three concept clusters combined with AND&#x2014;(1) AI-related terms (e.g., artificial intelligence, natural language processing, generative AI), (2) medical and health-professions education and training, and (3) instructional modality (e-learning, simulation, virtual reality)&#x2014;with synonyms within each cluster combined with OR. The complete, database-specific search strings are summarized in <xref ref-type="table" rid="T2">Table&#x00A0;2</xref>.</p>
<table-wrap id="T2" position="float"><label>Table&#x00A0;2</label>
<caption><p>Database search results using structured boolean queries focused on the intersection of AI and medical education, in the range 2019-2024.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Database</th>
<th valign="top" align="center">Query</th>
<th valign="top" align="center">Results</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Scopus</td>
<td valign="top" align="left" rowspan="3">&#x0022;Artificial Intelligence&#x201D; OR &#x201C;Natural Language Processing&#x201D; OR &#x201C;Generative AI&#x201D; AND &#x201C;Medical Education&#x201D; OR &#x201C;Healthcare Training&#x201D; AND &#x201C;E-learning&#x201D; OR &#x201C;Simulation-based learning&#x201D; OR &#x201C;Virtual Reality&#x0022;</td>
<td valign="top" align="center">258</td>
</tr>
<tr>
<td valign="top" align="left">PubMed</td>
<td valign="top" align="center">84</td>
</tr>
<tr>
<td valign="top" align="left">IEEE Xplore</td>
<td valign="top" align="center">50</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2b"><label>2.2</label><title>Database selection and search strategy</title>
<p>To ensure a comprehensive and multidisciplinary collection of articles, three major academic databases were selected for this review: SCOPUS, PubMed, and IEEE Xplore. Each database contributes uniquely to the intersection of healthcare, education, and emerging technologies, making them essential for capturing diverse research perspectives.
<list list-type="simple">
<list-item>
<p>SCOPUS was selected for its extensive indexing of peer-reviewed journals in health sciences, education, and engineering.</p></list-item>
<list-item>
<p>PubMed was selected for its rigorous selection of biomedical literature, including clinical and educational research specific to healthcare training.</p></list-item>
<list-item>
<p>IEEE Xplore was included to capture technical and engineering contributions to AI and simulation technologies, particularly those not indexed in traditional medical databases.</p></list-item>
</list>The Boolean formulation of main, topic-centric keywords (<xref ref-type="table" rid="T2">Table&#x00A0;2</xref>) was designed to encompass research at the intersection of AI technologies and immersive educational methods in healthcare, including simulation and virtual reality as core XR modalities. The study period was limited to 2019&#x2013;2024, a timeframe characterized by exponential advances in generative AI relevant to adaptive learning and intelligent tutoring systems, as well as the digital transformation catalyzed by the COVID-19 pandemic (2020-2022). This restriction was intended to capture the most up-to-date and relevant literature, including both early adoption studies and recent developments in the field.</p>
<p>The search strategy employed in these databases yielded a diverse set of studies spanning the technological, educational, and clinical dimensions of AI in medical education. Boolean queries were applied uniformly, and the results reflect the multidisciplinary scope of the field. <xref ref-type="table" rid="T2">Table&#x00A0;2</xref> presents a concise summary of the query structure and the number of articles retrieved from each source, providing a fundamental dataset for the screening and eligibility stages detailed in the following sections.</p>
<p>The volume and temporal evolution of these retrieved records provide insight into how interest in AI within medical education has developed over time. As shown in <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>, there is a clear increase in the number of records identified in all three databases from 2020 onwards, with SCOPUS consistently reporting the highest annual counts, followed by PubMed and IEEE Xplore. This trend reflects broader growth in research on AI and immersive digital tools in medical education rather than the characteristics of the final 13 studies included in the review.</p>
<fig id="F1" position="float"><label>Figure&#x00A0;1</label>
<caption><p>Annual distribution of all records retrieved in scopus, pubMed, and IEEE xplore between 2019 and 2024 prior to screening, eligibility assessment, and final inclusion.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1740557-g001.tif"><alt-text content-type="machine-generated">Bar chart titled \"Yearly Distribution of Articles on AI in Medical Education\" from 2019 to 2024. SCOPUS (orange) shows a significant increase, peaking in 2024. PubMed (blue) and IEEE Xplore (green) have smaller increases, with notable rises in 2023 and 2024.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2c"><label>2.3</label><title>Identification</title>
<p>The identification phase involved a systematic search of the three main databases mentioned above: SCOPUS (<italic>n</italic>&#x2009;&#x003D;&#x2009;258), PubMed (<italic>n</italic>&#x2009;&#x003D;&#x2009;84), and IEEE Xplore (<italic>n</italic>&#x2009;&#x003D;&#x2009;50). The search focused on literature specifically examining how AI technologies are integrated into teaching, learning, and assessment processes within medical education.</p>
<p><xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref> shows how the initial search yielded 392 records. The search strategy was intentionally designed to be highly sensitive, in line with PRISMA-oriented guidance, to minimize the risk of missing empirical AI- and XR-based educational interventions. Consequently, many records identified at the identification stage were concept papers, narrative or systematic reviews, or clinical AI studies without an educational component, which were subsequently excluded during the eligibility assessment.</p>
<fig id="F2" position="float"><label>Figure&#x00A0;2</label>
<caption><p>PRISMA flowchart showing the selection process of studies on AI in medical education across four phases: identification, screening, eligibility, and inclusion.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1740557-g002.tif"><alt-text content-type="machine-generated">Flowchart showing the selection process for articles. Initial sources include SCOPUS (258), PubMed (84), and IEEE Xplore (50), totaling 392. After removing 336 duplicates, 190 articles were screened. Exclusions include 93 for book-related content, 146 lacking keywords or abstracts, 3 unrelated to AI and Medicine, 67 unrelated to Education and Training, and 14 review articles, leaving 13 articles for intervention analysis.</alt-text>
</graphic>
</fig>
<p>After removing duplicates, 336 records remained. During title/abstract screening, records lacking essential metadata (e.g., abstracts or keywords) were excluded (<italic>n</italic>&#x2009;&#x003D;&#x2009;146), leaving 190 records for full-text review. At full-text screening, non-peer-reviewed formats (books, book chapters, conference papers, letters, and editorials) were excluded (<italic>n</italic>&#x2009;&#x003D;&#x2009;93), yielding 97 eligible articles. Finally, we excluded 3 studies not related to AI and Medicine, 67 not related to Education/Training, and 14 review articles, resulting in 13 studies included in the intervention analysis.</p>
</sec>
<sec id="s2d"><label>2.4</label><title>Screened</title>
<p>After removing duplicates, 336 records were available for the initial assessment. The goal of the screening phase (title/abstract screening) was to retain only records clearly relevant to the application of AI in medical and health-professions education for full-text eligibility. Each record was checked for title, abstract, indexed keywords, and basic bibliographic completeness.</p>
<p>Records lacking essential metadata needed to judge relevance&#x2014;principally abstracts and/or indexed keywords (a frequent issue in technical databases such as IEEE Xplore)&#x2014;were excluded.</p>
<p>All exclusions were logged to ensure transparency and reproducibility. In total, 146 records were excluded at this stage, leaving 190 records for full-text eligibility assessment. The outcomes of the screening step are summarized in <xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>.</p>
</sec>
<sec id="s2e"><label>2.5</label><title>Eligibility</title>
<p>The eligibility process involved the exclusion of 93 records based on publication type and source format. The excluded articles included books, book chapters, conference proceedings, editorials, letters, and commentaries, which, during the full-text review, were identified as not contributing information to the inclusion criteria. Recognizing that some of these sources provided important conceptual descriptions or theoretical perspectives on AI and its potential applications in medical education, they were rejected due to their preliminary nature, which often limits methodological detail and omits follow-up results or student assessment data.</p>
<p>In the case of editorials and letters, while sometimes relevant to ongoing debates in the field, they were also excluded due to their opinion-based structure and lack of data-driven findings. Books and book chapters, although often comprehensive, present challenges related to accessibility, inconsistent peer review standards, and variability in how interventions and outcomes are reported.</p>
<p>The exclusion of these 93 articles was necessary to preserve the analytical integrity and focus of the review, maintaining the focus on information extraction specifically in original articles. Therefore, only full-text empirical studies were eligible for further analysis. After this filtering process, 97 full-text articles were retained, which constitute the eligibility stage, where their relevance to AI applications in medical education and the presence of measurable learning outcomes were further assessed.</p>
</sec>
<sec id="s2f"><label>2.6</label><title>Inclusion</title>
<p>Of the 97 full-text articles assessed for eligibility, 13 studies were included in the final analysis. The selection of this resulting number of studies for inclusion was the result of a rigorous three-stage selection process, designed to isolate only those studies that met all thematic, methodological, and empirical criteria related to the central focus of the review: the use of AI in medical education.</p>
<p>The first stage of exclusion focused on articles unrelated to AI and medicine, of which 3 were identified, leaving 94. The eliminated studies were related to AI in a general or metaphorical sense, but did not involve actual AI systems, models, or algorithms in medical education contexts and focused on training in disciplines not directly related to medicine.</p>
<p>In Stage 2 of the eligibility assessment, we excluded technical and clinical journal articles that addressed the development or validation of AI systems for diagnosis, risk prediction, or clinical workflow optimization, but that did not involve students or evaluate educational interventions and therefore did not align with the review objective of synthesizing evidence on AI-based teaching and learning practices. These records were coded as having no AI-based educational applications because they did not report implemented AI, XR, or combined AI&#x2013;XR tools used in teaching, learning, assessment, or simulation for health-professions learner. Thirty-four articles were excluded at this stage, leaving 67 studies for the final filter.</p>
<p>To maintain the focus on original and empirical research, review articles, both narrative and systematic, were then excluded from the dataset. Systematic, scoping, and narrative reviews were therefore treated as non-eligible study designs and were used only for contextual background and to cross-check whether our search had missed primary studies (<italic>n</italic>&#x2009;&#x003D;&#x2009;14 excluded). While informative, these articles summarized existing literature rather than presenting new data or intervention results. The exclusion of these studies means that the final synthesis is based solely on primary studies with defined populations, interventions, and measurable learning outcomes.</p>
<p>The 13 articles ultimately selected constitute the inclusion stage. These are peer-reviewed, intervention-based research on the applications of AI in medical education, which included a quantitative or qualitative assessment of the educational impact.</p>
<p>In terms of study design, the presence of a control group was not used as an inclusion criterion. Many AI- and XR-based educational interventions are still reported as pilot or feasibility work and therefore lack formal comparators. During data extraction, the presence and type of comparator or control condition were recorded for each study and, when applicable, classified into one of four categories: (i) no formal comparator (single-group or pre&#x2013;post design), (ii) traditional instruction or standard curriculum, (iii) alternative technology-enhanced instruction without AI or immersive XR, and (iv) historical or convenience cohorts.</p>
</sec>
<sec id="s2g"><label>2.7</label><title>Quality appraisal</title>
<p>All included studies underwent a structured quality appraisal to assess the robustness of their design and reporting. Each article was evaluated using design-appropriate critical appraisal checklists (e.g., tools for randomized and quasi-experimental studies, observational designs, and qualitative research). When assessments differed, they were reconciled through discussion until consensus was reached. The appraisal focused on key domains such as study design and setting, sampling strategy and sample size justification, clarity and validity of outcome measures, handling of confounding factors, and transparency of data analysis and reporting. Given the heterogeneity of study designs and outcomes, we did not compute a pooled quality score. Instead, studies were categorized qualitatively as offering lower, moderate, or higher methodological robustness, and these judgements informed the synthesis presented in the &#x201C;Methodological Considerations and Quality of Evidence&#x201D; section.</p>
</sec>
</sec>
<sec id="s3" sec-type="results"><label>3</label><title>Results</title>
<p>The 13 selected studies detail how AI and XR have been implemented in medical and health-professions education, focusing on their effects on learning outcomes, implementation limitations, faculty preparedness, and ethical considerations.</p>
<p>Of these studies, only a small subset explicitly combined AI components with XR environments within a single educational intervention. Most articles evaluated either AI-based tools (e.g., generative AI, machine-learning&#x2013;driven analytics, intelligent tutoring) or XR-based simulations without an AI layer. The synthesis therefore distinguishes, where possible, between AI-only, XR-only, and emerging integrated AI&#x2013;XR designs.</p>
<sec id="s3a"><label>3.1</label><title>Analysis of the educational impact of the use of AI and XR in medicine</title>
<p><xref ref-type="table" rid="T3">Table&#x00A0;3</xref> presents an overview of the studies, their variables, methodologies, and sample sizes. Five studies focused on engagement, defined as the emotional and cognitive involvement of learners in learning activities. These interventions often used immersive or personalized environments to increase motivation and attention (<xref ref-type="bibr" rid="B34">34</xref>) surveyed 406 healthcare professionals and found widespread enthusiasm for XR-based training environments. However, the cross-sectional design limited causal inference. Similarly (<xref ref-type="bibr" rid="B38">38</xref>), explored how AI and VR were perceived in virtual interviews, with results indicating adaptability and a positive reception among 112 participants. In the study by (<xref ref-type="bibr" rid="B45">45</xref>), they demonstrated the reach of an international e-learning platform with NLP/ML support, with over 1,600 participants. Despite the large sample size, the study acknowledged limitations in assessing long-term impact (<xref ref-type="bibr" rid="B46">46</xref>). analyzed radiology training during the COVID-19 pandemic, highlighting the role of virtual platforms in maintaining student engagement (<xref ref-type="bibr" rid="B36">36</xref>), while focusing on communication skills, reported that AI-generated case simulations increased student participation due to their interactivity and realism.</p>
<table-wrap id="T3" position="float"><label>Table&#x00A0;3</label>
<caption><p>Summary of the studies and their intervention, educational variable evaluated, technology used, stage of development and sample size.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Study number</th>
<th valign="top" align="center">Intervention</th>
<th valign="top" align="center">Variable</th>
<th valign="top" align="center">Limitations</th>
<th valign="top" align="center">Stage of employed technology</th>
<th valign="top" align="center">Technology type</th>
<th valign="top" align="center">Number of participants</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Khan et al. (<xref ref-type="bibr" rid="B34">34</xref>)</td>
<td valign="top" align="left">Survey of healthcare professionals&#x2019; perceptions of XR adoption</td>
<td valign="top" align="left">Engagement</td>
<td valign="top" align="left">Cross-sectional design limits generalizability</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="center">406</td>
</tr>
<tr>
<td valign="top" align="left">&#x00A0;Bonfitto et al. (<xref ref-type="bibr" rid="B35">35</xref>)</td>
<td valign="top" align="left">ChatGPT dialogues simulate radiographer&#x2013;patient interactions for claustrophobia</td>
<td valign="top" align="left">Skill development</td>
<td valign="top" align="left">Small pilot, ChatGPT not designed for simulation</td>
<td valign="top" align="left">Pilot</td>
<td valign="top" align="left">ChatGPT (GPT-3.5 &#x0026; GPT-4)</td>
<td valign="top" align="center">6</td>
</tr>
<tr>
<td valign="top" align="left">Artemiou et al. (<xref ref-type="bibr" rid="B36">36</xref>)</td>
<td valign="top" align="left">AI-generated cases &#x0026; standardized clients for veterinary communication training</td>
<td valign="top" align="left">Skill development</td>
<td valign="top" align="left">Single institution, limited familiarity</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">ChatGPT-3.5</td>
<td valign="top" align="center">237</td>
</tr>
<tr>
<td valign="top" align="left">Prevezanou et al. (<xref ref-type="bibr" rid="B37">37</xref>)</td>
<td valign="top" align="left">ML models classify progress on laparoscopic VR simulator tasks</td>
<td valign="top" align="left">Performance</td>
<td valign="top" align="left">Limited kinematic data and subjective labels</td>
<td valign="top" align="left">Development</td>
<td valign="top" align="left">Machine learning algorithms</td>
<td valign="top" align="center">23</td>
</tr>
<tr>
<td valign="top" align="left">Tolentino et al. (<xref ref-type="bibr" rid="B38">38</xref>)</td>
<td valign="top" align="left">Survey on virtual interviews and AI/VR integration in residency programs</td>
<td valign="top" align="left">Engagement</td>
<td valign="top" align="left">Single-institution cross-sectional design</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="center">112</td>
</tr>
<tr>
<td valign="top" align="left">Latour et al. (<xref ref-type="bibr" rid="B39">39</xref>)</td>
<td valign="top" align="left">VASN features aid FESS simulation training for otolaryngology trainees</td>
<td valign="top" align="left">Performance</td>
<td valign="top" align="left">Small sample &#x0026; simulation environment</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">Virtually Augmented Surgical Navigation</td>
<td valign="top" align="center">15</td>
</tr>
<tr>
<td valign="top" align="left">Real et al. (<xref ref-type="bibr" rid="B40">40</xref>)</td>
<td valign="top" align="left">VR curriculum with didactics &#x0026; simulations for pediatric residents</td>
<td valign="top" align="left">Skill development</td>
<td valign="top" align="left">Small sample &#x0026; single institution</td>
<td valign="top" align="left">Pilot</td>
<td valign="top" align="left">Virtual Reality simulation</td>
<td valign="top" align="center">55</td>
</tr>
<tr>
<td valign="top" align="left">Krive et al. (<xref ref-type="bibr" rid="B41">41</xref>)</td>
<td valign="top" align="left">Four-week AI course integrating evidence-based medicine &#x0026; clinical topics</td>
<td valign="top" align="left">Skill development</td>
<td valign="top" align="left">Small cohorts &#x0026; single institution</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">General AI instruction</td>
<td valign="top" align="center">20</td>
</tr>
<tr>
<td valign="top" align="left">Mergen et al. (<xref ref-type="bibr" rid="B42">42</xref>)</td>
<td valign="top" align="left">Development of AI-driven VR platform for virtual patients (medical tr.AI.ning)</td>
<td valign="top" align="left">Skill development</td>
<td valign="top" align="left">Development stage&#x2014;no evaluation</td>
<td valign="top" align="left">Development</td>
<td valign="top" align="left">AI-driven VR platform</td>
<td valign="top" align="center">Not reported</td>
</tr>
<tr>
<td valign="top" align="left">Tsopra et al. (<xref ref-type="bibr" rid="B43">43</xref>)</td>
<td valign="top" align="left">Elective program where students design AI clinical decision support systems</td>
<td valign="top" align="left">Skill development</td>
<td valign="top" align="left">Small elective &#x0026; limited generalizability</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">AI-CDSS design</td>
<td valign="top" align="center">15</td>
</tr>
<tr>
<td valign="top" align="left">Andersen et al. (<xref ref-type="bibr" rid="B44">44</xref>)</td>
<td valign="top" align="left">Interactive online platform (VIOLA) for diabetic retinopathy training</td>
<td valign="top" align="left">Skill development</td>
<td valign="top" align="left">Limited to regional adoption</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">Online learning platform</td>
<td valign="top" align="center">150</td>
</tr>
<tr>
<td valign="top" align="left">Borakati et al. (<xref ref-type="bibr" rid="B45">45</xref>)</td>
<td valign="top" align="left">International e-learning course evaluated with NLP &#x0026; ML</td>
<td valign="top" align="left">Engagement</td>
<td valign="top" align="left">Limited generalizability &#x0026; outcome measures</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">E-learning with NLP/ML</td>
<td valign="top" align="center">1611</td>
</tr>
<tr>
<td valign="top" align="left">Gabr et al. (<xref ref-type="bibr" rid="B46">46</xref>)</td>
<td valign="top" align="left">Impact analysis of radiology case volumes &#x0026; remote education during pandemics</td>
<td valign="top" align="left">Engagement</td>
<td valign="top" align="left">Observational and single-institution data</td>
<td valign="top" align="left">Study</td>
<td valign="top" align="left">Virtual learning platforms</td>
<td valign="top" align="center">Not reported</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref> details that engagement-oriented studies represented 38&#x0025; of the sample. These findings suggest that AI and XR can effectively foster student participation, especially when traditional formats are disrupted or inaccessible. Skills development emerged as the most frequently assessed educational variable, appearing in seven of the 13 studies (54&#x0025;) (<xref ref-type="bibr" rid="B35">35</xref>). conducted a pilot study on the use of ChatGPT to simulate conversations between radiology technicians and claustrophobic patients. Although the sample size was limited (<italic>n</italic>&#x2009;&#x003D;&#x2009;6), the study demonstrated the potential for developing communication skills through AI-generated dialogue (<xref ref-type="bibr" rid="B36">36</xref>). These studies used standardized AI-generated cases in veterinary training (<italic>n</italic>&#x2009;&#x003D;&#x2009;237) and reported improvements in confidence and fluency during client interactions.</p>
<fig id="F3" position="float"><label>Figure&#x00A0;3</label>
<caption><p>Distribution of the reviewed studies according to the main educational variable: skills development, engagement and performance.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1740557-g003.tif"><alt-text content-type="machine-generated">Pie chart titled \"Variable\" showing three segments: Performance at 15.4%, Engagement at 30.8%, and Skill Development at 53.8%. Performance is the smallest segment, and Skill Development is the largest.</alt-text>
</graphic>
</fig>
<p>Other studies, such as (<xref ref-type="bibr" rid="B40">40</xref>), employed a virtual reality simulation program for pediatric residents, combining theory with practice. In this study, participants showed improved task performance, although the institutional scope was limited. In another application (<xref ref-type="bibr" rid="B41">41</xref>), offered a four-week AI course that integrated clinical and evidence-based medicine content, fostering critical thinking despite small group sizes (<xref ref-type="bibr" rid="B43">43</xref>). Their research encouraged students to design their own clinical decision support systems, integrating AI tools into the curriculum design: a promising approach to skills development through hands-on experience (<xref ref-type="bibr" rid="B44">44</xref>). They developed the VIOLA platform for diagnosing diabetic retinopathy, training 150 participants via an online interface. Finally (<xref ref-type="bibr" rid="B42">42</xref>), described an AI-powered VR platform, currently under development, and proposed future training in virtual patient encounters.</p>
<p>Only two studies (15&#x0025;) explicitly measured performance, defined as the demonstration of competence in clinical tasks or simulations (<xref ref-type="bibr" rid="B37">37</xref>). used machine learning models to classify students&#x0027; progress in laparoscopic virtual reality simulations. Despite the innovative analysis, the small sample size (<italic>n</italic>&#x2009;&#x003D;&#x2009;23) and reliance on subjective classification limited the robustness of the conclusions (<xref ref-type="bibr" rid="B39">39</xref>). tested Virtual Augmented Surgical Navigation (VASN) in otolaryngology simulation. The intervention improved performance in surgical planning but was limited by the sample size (<italic>n</italic>&#x2009;&#x003D;&#x2009;15) and simulation fidelity. As shown in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>, performance remains the least explored outcome in the current AI/XR education literature.</p>
<fig id="F4" position="float"><label>Figure&#x00A0;4</label>
<caption><p>Studies reviewed by technology implementation stage: study, pilot, and development phases.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1740557-g004.tif"><alt-text content-type="machine-generated">Pie chart titled \"Stage of Employed Technology\" showing three segments: Development at 69.2%, Pilot at 15.4%, and Study at 15.4%. Each segment is represented in different shades of gray.</alt-text>
</graphic>
</fig>
<p>Two studies were identified as pilot initiatives, representing initial experiments to explore feasibility and usability (<xref ref-type="bibr" rid="B35">35</xref>). conducted a pilot study on the use of ChatGPT for radiologist and patient simulations, and (<xref ref-type="bibr" rid="B40">40</xref>) implemented a VR pilot program; two other studies were in the development stage, where no outcome assessment had been performed (<xref ref-type="bibr" rid="B42">42</xref>). proposed an AI-powered VR platform but did not present student data. The existence of such nascent projects highlights the innovative nature of this research area but also points to the need for more rigorous validation frameworks.</p>
<p>The distribution shown in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref> reflects a steady flow of innovation, progressing gradually from exploratory design to empirical validation. However, the relatively low number of pilot and development projects could imply a publication bias toward studies with proven technologies, which might overlook the value of documenting conceptual innovations or prototypes.</p>
</sec>
<sec id="s3b"><label>3.2</label><title>Observed effects, challenges, and methodological contexts</title>
<p>In general, studies consistently documented improvements in educational performance, student confidence, and skill acquisition (<xref ref-type="bibr" rid="B34">34</xref>). conducted a cross-sectional survey in Pakistan to assess perceptions and readiness for integrating XR into medical education. The results indicated that 83.8&#x0025; of participants believed XR could effectively improve the quality of education and patient care, observing statistically significant correlations between familiarity with XR and favorable attitudes toward its use in diagnostic and surgical procedures.</p>
<p><xref ref-type="table" rid="T4">Table&#x00A0;4</xref> summarizes the observed intervention effects together with study design and comparator. In this review, effects were classified as &#x201C;positive&#x201D; when the original study reported improvement in at least one predefined learning outcome (knowledge acquisition, skill development, performance, or engagement), either relative to baseline in pre&#x2013;post designs or compared with a control or alternative condition, using the significance criteria specified by the authors. Outcomes were labeled as &#x201C;mixed/neutral&#x201D; when no clear improvement was observed or when findings differed across domains.</p>
<table-wrap id="T4" position="float"><label>Table&#x00A0;4</label>
<caption><p>Summary of included studies: study design, comparator/control condition, and observed intervention effects across predefined learning outcomes.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Study number</th>
<th valign="top" align="center">Effect</th>
<th valign="top" align="center">Barriers/challenges</th>
<th valign="top" align="center">Study design</th>
<th valign="top" align="center">Comparator/control condition</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Khan et al. (<xref ref-type="bibr" rid="B34">34</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Technological, privacy and training barriers</td>
<td valign="top" align="left">Cross-sectional survey</td>
<td valign="top" align="left">None (no comparator; single-group cross-sectional survey)</td>
</tr>
<tr>
<td valign="top" align="left">Bonfitto et al. (<xref ref-type="bibr" rid="B35">35</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">AI bias and model differences</td>
<td valign="top" align="left">Pilot case study</td>
<td valign="top" align="left">None (single-group pilot; no educational control group)</td>
</tr>
<tr>
<td valign="top" align="left">Artemiou et al. (<xref ref-type="bibr" rid="B36">36</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Data privacy, unclear guidelines, low literacy</td>
<td valign="top" align="left">Descriptive study</td>
<td valign="top" align="left">None (single-group communication lab; no comparator group)</td>
</tr>
<tr>
<td valign="top" align="left">Prevezanou et al. (<xref ref-type="bibr" rid="B37">37</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Restricted data access and sensors</td>
<td valign="top" align="left">Developmental study</td>
<td valign="top" align="left">None (single-group VR training curriculum; no external comparator)</td>
</tr>
<tr>
<td valign="top" align="left">Tolentino et al. (<xref ref-type="bibr" rid="B38">38</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Response bias and limited generalizability</td>
<td valign="top" align="left">Cross-sectional survey</td>
<td valign="top" align="left">None (no comparator; observational survey of AI education initiatives)</td>
</tr>
<tr>
<td valign="top" align="left">Latour et al. (<xref ref-type="bibr" rid="B39">39</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Technology complexity</td>
<td valign="top" align="left">Prospective trial</td>
<td valign="top" align="left">Within-subject repeated-measures design (participants serve as their own control across VASN sessions)</td>
</tr>
<tr>
<td valign="top" align="left">Real et al. (<xref ref-type="bibr" rid="B40">40</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Cost and resource limitations</td>
<td valign="top" align="left">Randomized controlled pilot trial</td>
<td valign="top" align="left">Active control group&#x2014;alternative VR simulation (respiratory-distress scenario; intervention vs. control VR curriculum)</td>
</tr>
<tr>
<td valign="top" align="left">Krive et al. (<xref ref-type="bibr" rid="B41">41</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Faculty expertise and funding limitations</td>
<td valign="top" align="left">Educational innovation</td>
<td valign="top" align="left">None (single-group curriculum implementation; no separate control group)</td>
</tr>
<tr>
<td valign="top" align="left">Mergen et al. (<xref ref-type="bibr" rid="B42">42</xref>)</td>
<td valign="top" align="left">Increased</td>
<td valign="top" align="left">Technical complexity and integration</td>
<td valign="top" align="left">Development project</td>
<td valign="top" align="left">Not applicable (development project; no learner cohort or control group)</td>
</tr>
<tr>
<td valign="top" align="left">Tsopra et al. (<xref ref-type="bibr" rid="B43">43</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Logistical challenges</td>
<td valign="top" align="left">Elective course evaluation</td>
<td valign="top" align="left">None (single-group elective course evaluation; no comparator group)</td>
</tr>
<tr>
<td valign="top" align="left">Andersen et al. (<xref ref-type="bibr" rid="B44">44</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Need robust digital infrastructure</td>
<td valign="top" align="left">Case study/implementation</td>
<td valign="top" align="left">None (single-group implementation; no comparator group)</td>
</tr>
<tr>
<td valign="top" align="left">Borakati et al. (<xref ref-type="bibr" rid="B45">45</xref>)</td>
<td valign="top" align="left">Positive</td>
<td valign="top" align="left">Digital infrastructure &#x0026; language barriers</td>
<td valign="top" align="left">Cross-sectional evaluation</td>
<td valign="top" align="left">None (single-group international e-learning evaluation; no comparator group)</td>
</tr>
<tr>
<td valign="top" align="left">Gabr et al. (<xref ref-type="bibr" rid="B46">46</xref>)</td>
<td valign="top" align="left">Increased</td>
<td valign="top" align="left">Reduced case volumes &#x0026; remote challenges</td>
<td valign="top" align="left">Observational analysis</td>
<td valign="top" align="left">Historical control&#x2014;comparison with pre-pandemic case volumes from prior years</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Regarding study designs and comparators, we observed a predominance of single-group or pre&#x2013;post evaluations. Only a minority of studies implemented a formal control group, typically based on traditional lectures or standard skills training, while a smaller subset compared AI or XR interventions against other digital resources or simulation modalities. The type of comparator used in each study is summarized in the &#x201C;Comparator/control condition&#x201D; of <xref ref-type="table" rid="T4">Table&#x00A0;4</xref>.</p>
<p>Similarly (<xref ref-type="bibr" rid="B40">40</xref>), in a randomized controlled pilot trial, demonstrated that residents participating in a VR-based behavioral health training module showed statistically significant improvements in motivational interviewing (MI) and anticipatory behavioral health guidance (OAHG) skills. These improvements were quantitatively supported by a higher frequency of open-ended questions and MI-aligned behaviors, illustrating the effectiveness of immersive simulation as a teaching tool.</p>
<p>Another noteworthy contribution comes from (<xref ref-type="bibr" rid="B35">35</xref>), who used ChatGPT-3.5 and ChatGPT-4 to simulate interactions with patients in a radiographic environment. This pilot case study showed a 96.7&#x0025; success rate in managing virtual patients experiencing claustrophobia during magnetic resonance imaging (MRI), with communication styles adapted to the different experience levels of the radiologic technologists. This innovative application of LLM models adds a new dimension to preclinical training by reinforcing communication skills between students and patients.</p>
<p><xref ref-type="fig" rid="F5">Figure&#x00A0;5a</xref> shows that 11 of the 13 studies (84.6&#x0025;) reported a positive impact on learning or teaching delivery, whereas 2 (15.4&#x0025;) were mixed/neutral. <xref ref-type="fig" rid="F5">Figure&#x00A0;5b</xref> shows the varied geographical distribution of the studies, with contributions from North America (<italic>n</italic>&#x2009;&#x003D;&#x2009;4), Europe (<italic>n</italic>&#x2009;&#x003D;&#x2009;4), Asia (<italic>n</italic>&#x2009;&#x003D;&#x2009;1), and one multicenter study. Four studies did not specify their location, which limited a more detailed geographical analysis. The United States had the largest representation (<italic>n</italic>&#x2009;&#x003D;&#x2009;3), with studies focusing on innovative applications such as digital radiology training during COVID-19 (<xref ref-type="bibr" rid="B46">46</xref>) and competency-based AI training (<xref ref-type="bibr" rid="B41">41</xref>). The multicenter evaluation in (<xref ref-type="bibr" rid="B45">45</xref>) revealed the scalability of e-learning platforms and explored the use of natural language processing to analyze user feedback in 24 countries, providing insights into the global applicability of AI-enhanced learning.</p>
<fig id="F5" position="float"><label>Figure&#x00A0;5</label>
<caption><p><bold>(a)</bold> distribution of the educational effects reported in the selected studies. <bold>(b)</bold> Geographic distribution of the studies by country, including multiple locations and cases with unspecified origin.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1740557-g005.tif"><alt-text content-type="machine-generated">Bar chart and world map comparing study effects and distribution. Chart shows ten positive and two increased studies. Map highlights study locations; North America has three, Europe five, and Asia one. Two studies are not location-specific.</alt-text>
</graphic>
</fig>
<p>Despite the positive results in the vast majority of studies, almost all reported significant barriers hindering smooth adoption. <xref ref-type="table" rid="T4">Table&#x00A0;4</xref> shows technological complexity and system integration problems detailed in (<xref ref-type="bibr" rid="B39">39</xref>) and (<xref ref-type="bibr" rid="B42">42</xref>), as well as concerns about data privacy and a lack of regulatory clarity observed in studies (<xref ref-type="bibr" rid="B34">34</xref>) and (<xref ref-type="bibr" rid="B36">36</xref>). Another key barrier, according to (<xref ref-type="bibr" rid="B41">41</xref>) and (<xref ref-type="bibr" rid="B45">45</xref>), is inadequate infrastructure and training for carrying out study activities, compounded, as in the case of (<xref ref-type="bibr" rid="B40">40</xref>), by high costs and resource limitations.</p>
<p>A recurring theme in various studies is the ethical and legal ambiguity associated with the use of AI and XR in educational settings. Study (<xref ref-type="bibr" rid="B34">34</xref>) identified privacy concerns as one of the main barriers (mentioned by 63.9&#x0025; of participants), along with technological underdevelopment (70&#x0025;) and insufficient XR training among healthcare professionals (45.8&#x0025;) (<xref ref-type="bibr" rid="B36">36</xref>)., detailing a centralization in the use of AI in veterinary education, observed that upper-level students showed resistance due to uncertainty and a lack of confidence in AI tools, pointing to a systemic need for early training in AI digital literacy. Similarly (<xref ref-type="bibr" rid="B43">43</xref>),, through their elective AI-CDSS course implemented in France, reported logistical barriers to implementation, although student participation remained high.</p>
</sec>
<sec id="s3c"><label>3.3</label><title>Areas of medical education, training modalities, and implementation environments</title>
<p><xref ref-type="table" rid="T5">Table&#x00A0;5</xref> shows that current evidence reveals a dynamic integration of AI and immersive technologies across a wide range of medical education settings; these innovations offer promising strategies for skills acquisition, communication training, and clinical reasoning. As shown in <xref ref-type="fig" rid="F6">Figure&#x00A0;6</xref>, general medical education represented the most common area of &#x200B;&#x200B;focus, encompassing almost half of the reviewed studies, a total of 6. This finding reflects a growing recognition of the critical role that digital health literacy plays in modern clinical practice. Generalist programs, such as those implemented by (<xref ref-type="bibr" rid="B34">34</xref>) (<xref ref-type="bibr" rid="B41">41</xref>),, and (<xref ref-type="bibr" rid="B43">43</xref>), frequently addressed key concepts of AI, clinical decision support systems (CDSS), telemedicine, and data ethics. On the other hand, the study by (<xref ref-type="bibr" rid="B41">41</xref>) presented a four-week interactive course that used clinical practice simulations and online modules to teach medical students the application of AI in patient care. This course demonstrated a high level of participation and knowledge acquisition, underscoring the importance of integrating AI literacy from the early stages of medical curricula.</p>
<table-wrap id="T5" position="float"><label>Table&#x00A0;5</label>
<caption><p>Area of medical education, duration of the intervention, assessment tools, training modalities and implementation contexts.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Study number</th>
<th valign="top" align="center">Area</th>
<th valign="top" align="center">Duration of 
intervention</th>
<th valign="top" align="center">Assessment tools</th>
<th valign="top" align="center">Training provided</th>
<th valign="top" align="center">Context/setting</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Khan et al. (<xref ref-type="bibr" rid="B34">34</xref>)</td>
<td valign="top" align="left">Medical General</td>
<td valign="top" align="left">Two-month survey</td>
<td valign="top" align="left">Online questionnaire</td>
<td valign="top" align="left">None</td>
<td valign="top" align="left">Healthcare community across Pakistan</td>
</tr>
<tr>
<td valign="top" align="left">Bonfitto et al. (<xref ref-type="bibr" rid="B35">35</xref>)</td>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="left">Simulation success and error rates</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="left">Radiography education (MRI)</td>
</tr>
<tr>
<td valign="top" align="left">Artemiou et al. (<xref ref-type="bibr" rid="B36">36</xref>)</td>
<td valign="top" align="left">Veterinary Medicine</td>
<td valign="top" align="left">90-minute lab</td>
<td valign="top" align="left">Questionnaire and perceptions survey</td>
<td valign="top" align="left">30-min theory&#x002B;60-min practice</td>
<td valign="top" align="left">Veterinary school communication lab</td>
</tr>
<tr>
<td valign="top" align="left">Prevezanou et al. (<xref ref-type="bibr" rid="B37">37</xref>)</td>
<td valign="top" align="left">Surgery</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="left">VR task metrics &#x0026; ML classification</td>
<td valign="top" align="left">Structured VR curriculum</td>
<td valign="top" align="left">Laparoscopic simulation lab</td>
</tr>
<tr>
<td valign="top" align="left">Tolentino et al. (<xref ref-type="bibr" rid="B38">38</xref>)</td>
<td valign="top" align="left">Medical General</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="left">Online survey</td>
<td valign="top" align="left">None</td>
<td valign="top" align="left">Residency program at McGill University</td>
</tr>
<tr>
<td valign="top" align="left">Latour et al. (<xref ref-type="bibr" rid="B39">39</xref>)</td>
<td valign="top" align="left">Surgery</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="left">Technical skill scores and time</td>
<td valign="top" align="left">VASN simulation sessions</td>
<td valign="top" align="left">3D-printed FESS simulation lab</td>
</tr>
<tr>
<td valign="top" align="left">Real et al. (<xref ref-type="bibr" rid="B40">40</xref>)</td>
<td valign="top" align="left">Pediatrics</td>
<td valign="top" align="left">Four months</td>
<td valign="top" align="left">Psychologist-rated VR scenarios</td>
<td valign="top" align="left">Didactics&#x002B;VR practice</td>
<td valign="top" align="left">Pediatric residency training</td>
</tr>
<tr>
<td valign="top" align="left">Krive et al. (<xref ref-type="bibr" rid="B41">41</xref>)</td>
<td valign="top" align="left">Medical General</td>
<td valign="top" align="left">Four weeks</td>
<td valign="top" align="left">Quizzes and assignments</td>
<td valign="top" align="left">Interactive modules and assignments</td>
<td valign="top" align="left">University of Illinois College of Medicine</td>
</tr>
<tr>
<td valign="top" align="left">Mergen et al. (<xref ref-type="bibr" rid="B42">42</xref>)</td>
<td valign="top" align="left">Medical General</td>
<td valign="top" align="left">Not applicable</td>
<td valign="top" align="left">Not applicable</td>
<td valign="top" align="left">Not applicable</td>
<td valign="top" align="left">Not applicable</td>
</tr>
<tr>
<td valign="top" align="left">Tsopra et al. (<xref ref-type="bibr" rid="B43">43</xref>)</td>
<td valign="top" align="left">Medical General</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="left">Student ratings and feedback</td>
<td valign="top" align="left">Lectures &#x0026; brainstorming sessions</td>
<td valign="top" align="left">University Paris Cit&#x00E9;</td>
</tr>
<tr>
<td valign="top" align="left">Andersen et al. (<xref ref-type="bibr" rid="B44">44</xref>)</td>
<td valign="top" align="left">Ophthalmology</td>
<td valign="top" align="left">Self-paced</td>
<td valign="top" align="left">Exercises &#x0026; certification tests</td>
<td valign="top" align="left">Instructional videos &#x0026; interactive tasks</td>
<td valign="top" align="left">Diabetic retinopathy screening training</td>
</tr>
<tr>
<td valign="top" align="left">Borakati et al. (<xref ref-type="bibr" rid="B45">45</xref>)</td>
<td valign="top" align="left">Medical General</td>
<td valign="top" align="left">Not reported</td>
<td valign="top" align="left">Sentiment analysis &#x0026; topic modelling</td>
<td valign="top" align="left">E-learning modules</td>
<td valign="top" align="left">International multi-country</td>
</tr>
<tr>
<td valign="top" align="left">Gabr et al. (<xref ref-type="bibr" rid="B46">46</xref>)</td>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">April 2020</td>
<td valign="top" align="left">Case volume &#x0026; didactic hours analysis</td>
<td valign="top" align="left">Increased remote didactics</td>
<td valign="top" align="left">Radiology residency program</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F6" position="float"><label>Figure&#x00A0;6</label>
<caption><p>Distribution of studies by area of medical education.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-07-1740557-g006.tif"><alt-text content-type="machine-generated">Bar chart titled \"Area medical in education\" displaying the number of studies in different medical fields. Medical General leads with six studies, followed by Ophthalmology and Pediatrics with two each. Radiology, Surgery, and Veterinary Medicine have one study each.</alt-text>
</graphic>
</fig>
<p>Surgery was explored in two studies (<xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B39">39</xref>). In the first study, a VR-based structured laparoscopic curriculum was evaluated using machine learning classifiers to assess student performance. The incorporation of explainable AI (XAI) models into the feedback process allowed for objective, real-time performance monitoring (<xref ref-type="bibr" rid="B39">39</xref>). In their study, augmented surgical navigation (VASN) was used to enhance training in functional endoscopic sinus surgery (FESS). Their results detail that participants demonstrated significant improvements in their skill and confidence scores, supporting the effectiveness of AI-assisted simulation environments for surgical training.</p>
<p>Radiology was the focus of two additional studies (<xref ref-type="bibr" rid="B35">35</xref>). In their study, generative AI (ChatGPT-3.5 and 4.0) was used to simulate communication between radiology technicians and claustrophobic patients. The study highlighted the usefulness of AI in developing interpersonal skills through natural language interaction, an area often overlooked in traditional radiology training. In the same vein (<xref ref-type="bibr" rid="B46">46</xref>), researchers analyzed the impact of COVID-19 on radiology case volume and instructional design, observing a shift toward online remote learning platforms. These studies underscore how AI can facilitate both the technical and interpersonal aspects of radiology training.</p>
<p>Other fields included pediatrics (<xref ref-type="bibr" rid="B40">40</xref>), ophthalmology (<xref ref-type="bibr" rid="B44">44</xref>), and veterinary medicine (<xref ref-type="bibr" rid="B36">36</xref>). For example (<xref ref-type="bibr" rid="B40">40</xref>), a randomized controlled trial was conducted that incorporated virtual reality simulations to improve behavioral health anticipatory guidance (BHAG) and motivational interviewing skills among pediatric residents. The results indicated statistically significant improvements in communication skills, illustrating the potential of immersive technologies in behavioral pediatrics. In ophthalmology (<xref ref-type="bibr" rid="B44">44</xref>), the study developed a self-learning digital platform for diabetic retinopathy detection. The program offered certification and interactive learning tasks, and more than 150 healthcare professionals completed the course, demonstrating the scalability of this type of model. In contrast, research by (<xref ref-type="bibr" rid="B36">36</xref>) presented veterinary students with AI-generated standardized clients, providing valuable information on integrating AI into veterinary communication training. The students responded positively but identified a clear need to increase AI literacy within veterinary education.</p>
<p>The studies analyzed employed diverse training modalities, reflecting both institutional resources and pedagogical preferences. These modalities included self-paced online modules (<xref ref-type="bibr" rid="B44">44</xref>), structured simulation labs (<xref ref-type="bibr" rid="B39">39</xref>), short workshops (<xref ref-type="bibr" rid="B36">36</xref>), randomized controlled trials (<xref ref-type="bibr" rid="B40">40</xref>), and e-learning courses with natural language processing (NLP)-based assessments (<xref ref-type="bibr" rid="B45">45</xref>). For example (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B38">38</xref>), were survey-based and did not involve direct educational interventions, but rather assessed perceptions and attitudes toward AI and XR in medical training settings.</p>
<p>The duration of the educational interventions varied significantly, from single-day or single-session labs (<xref ref-type="bibr" rid="B36">36</xref>) to modules lasting several weeks (<xref ref-type="bibr" rid="B41">41</xref>). In study (<xref ref-type="bibr" rid="B40">40</xref>), monthly instructional sessions were conducted over a four-month period, combining theory with repeated virtual reality practice. In comparison with studies (<xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B46">46</xref>), these studies did not report the duration of the intervention, highlighting the need for standardized reporting practices in medical education research.</p>
<p>Assessment tools ranged from validated questionnaires and psychometric scales to advanced analytical frameworks, including machine learning. Study (<xref ref-type="bibr" rid="B35">35</xref>) evaluated simulation success rates and AI error detection, while study (<xref ref-type="bibr" rid="B37">37</xref>) used predictive models to assess skill progression in laparoscopic tasks. Study (<xref ref-type="bibr" rid="B41">41</xref>) relied on weekly questionnaires and task ratings to monitor student learning outcomes, and study (<xref ref-type="bibr" rid="B40">40</xref>) employed pediatric psychologists to code residents&#x0027; behavior in simulated encounters. Qualitative feedback was also common (<xref ref-type="bibr" rid="B43">43</xref>). incorporated student self-reports and project engagement metrics to measure curriculum effectiveness, while (<xref ref-type="bibr" rid="B45">45</xref>) applied sentiment analysis and theme modeling to extract qualitative feedback from large-scale courses.</p>
<p>Across the 13 studies, knowledge acquisition was typically measured through written tests or online quizzes; skill development and performance were captured through OSCE-style checklists, simulator-derived scores, error rates, and task-completion times; and engagement was assessed using Likert-type satisfaction or engagement scales, usage statistics from digital platforms, and qualitative feedback. This mapping between outcome domains and assessment instruments is summarized in <xref ref-type="table" rid="T5">Table&#x00A0;5</xref>.</p>
<p>Finally, some studies, such as (<xref ref-type="bibr" rid="B42">42</xref>), described digital education platforms under development, including &#x201C;medical tr.AI.ning,&#x201D; a modular tool designed to simulate first-person clinical decision-making using AI-powered virtual agents. These projects point to the future of medical education: scalable, customizable, and highly interactive environments powered by AI and XR.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><label>4</label><title>Discussion</title>
<p>Most of the included studies reported positive effects associated with the implementation of AI and XR, particularly in improving skills development, student engagement, and performance indicators. These technologies were frequently introduced through simulations, virtual scenarios, chat-based AI, or e-learning platforms, suggesting a wide range of implementation modalities adapted to institutional resources and educational objectives. General medical education emerged as the most frequently addressed area, possibly reflecting its broad relevance and lower technical barriers to implementation compared to highly specialized fields. The widespread adoption of these innovations is not without challenges. Several of the selected studies highlighted recurring barriers, such as limitations in technological infrastructure, concerns about data privacy, insufficient teacher training, and the absence of standardized curriculum frameworks.</p>
<sec id="s4a"><label>4.1</label><title>Implications for curriculum design in medical education</title>
<p>Several studies have demonstrated the positive impact of AI and XR technologies on student skills development and engagement (<xref ref-type="bibr" rid="B35">35</xref>). They used ChatGPT to simulate interactions between radiologists and patients, improving communication strategies during MRI scans (<xref ref-type="bibr" rid="B36">36</xref>). They showed that veterinary students benefited from AI-generated clinical cases and standardized patients, which facilitated communication training through immersive and contextualized experiences.</p>
<p>Other interventions, such as the study (<xref ref-type="bibr" rid="B39">39</xref>), integrated VASN augmented surgical navigation into endoscopic sinus surgery training, demonstrating improvements in technical performance and confidence in the procedure (<xref ref-type="bibr" rid="B40">40</xref>). They implemented virtual reality simulations to improve motivational interviewing and behavioral health counseling skills in pediatric residents (<xref ref-type="bibr" rid="B41">41</xref>). They introduced a structured AI module focused on evidence-based medicine and clinical integration, which improved students&#x0027; conceptual and applied understanding. Their approach, based on reverse design and modular instruction, illustrates how AI literacy can be taught even to students with no prior technical knowledge (<xref ref-type="bibr" rid="B43">43</xref>). They went further by involving students in the design of AI-based clinical decision support systems, fostering critical thinking and digital leadership.</p>
<p>These findings point to several directions for advancing medical education with AI and XR. Interventions should be aligned with existing competency-based frameworks by mapping AI- and XR-enhanced activities to specific knowledge, skills, and professional behaviors, rather than treating them as isolated add-ons. In many of the most promising examples, AI or XR is embedded within longitudinal learning sequences, such as combining introductory modules on AI literacy with progressively more complex simulated cases and opportunities for supervised decision-making. Immersive simulations and AI-driven feedback can be directed toward competencies that are difficult to practice in routine clinical placements, including communication in high-stakes scenarios, management of rare events, and interprofessional collaboration. Several studies also highlight the value of involving students as co-designers of AI tools and learning activities, which may support digital professionalism, critical appraisal of algorithms, and more responsible adoption of these technologies in future clinical practice.</p>
</sec>
<sec id="s4b"><label>4.2</label><title>Methodological considerations and quality of evidence</title>
<p>The formal quality appraisal confirmed substantial methodological heterogeneity across the 13 included studies, both in research design and in the assessment tools employed. This diversity provides a broad overview of approaches to AI and XR in medical education but also limits comparability and constrains the strength of inferences that can be drawn from the available evidence.</p>
<p>A large proportion of studies relied on observational or descriptive designs, such as cross-sectional surveys (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B38">38</xref>) and uncontrolled educational evaluations (<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B44">44</xref>). These designs are useful for exploring initial perceptions, feasibility, and acceptance of new technologies, yet their capacity to establish causal relationships or accurately quantify educational impact is limited. The absence of control groups in many of these investigations, together with a frequent reliance on subjective measures&#x2014;such as self-administered questionnaires or satisfaction surveys&#x2014;reduces the robustness of the findings.</p>
<p>A smaller subset of studies adopted more rigorous designs, including randomized controlled trials (<xref ref-type="bibr" rid="B40">40</xref>), prospective trials (<xref ref-type="bibr" rid="B39">39</xref>), and educational innovations with systematic performance evaluation (<xref ref-type="bibr" rid="B41">41</xref>). These approaches offer comparatively stronger evidence and allow more credible inferences about the effects of AI- and XR-based interventions on clinical learning. Nevertheless, even within this group, the quality of the evidence is often constrained by small sample sizes, single-centre settings, and restricted follow-up periods. Long-term assessments of knowledge retention, skills transfer to real clinical environments, and sustained behavioural change were rarely reported, and outcome measures were not always based on validated instruments.</p>
<p>Another methodological consideration is the diversity of technologies and implementations evaluated. Some studies focused on virtual reality simulations (<xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B40">40</xref>), others on conversational AI tools (<xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B36">36</xref>) or e-learning platforms (<xref ref-type="bibr" rid="B44">44</xref>, <xref ref-type="bibr" rid="B45">45</xref>). Across these contexts, limited faculty training, inadequate financial and technical resources, and logistical barriers were frequently highlighted. These constraints not only influence the success of implementation but also impact study design, sample recruitment, and the quality of data collection.</p>
<p>These features indicate that the current evidence base should be interpreted as preliminary and largely hypothesis-generating rather than definitive. While signals of benefit for AI and XR in medical and health-professions education are consistent across studies, the overall certainty of the evidence is low to moderate at best. Stronger, more standardized methodological approaches will be required to determine the true magnitude and generalizability of the educational effects attributed to these technologies.</p>
</sec>
<sec id="s4c"><label>4.3</label><title>Comparative with other studies</title>
<p><xref ref-type="table" rid="T6">Table&#x00A0;6</xref>, a comparison of this review with existing literature, reveals convergences and differences in thematic focus, methodological orientation, and conceptual contributions to understanding how digital technologies&#x2014;particularly AI and XR&#x2014;are transforming medical education. While several previous reviews have addressed the integration of emerging technologies in medical or health-related training, this review synthesizes empirical studies across diverse clinical settings, educational modalities, and geographical contexts, offering a more comprehensive and integrative perspective.</p>
<table-wrap id="T6" position="float"><label>Table&#x00A0;6</label>
<caption><p>Reviews on AI, XR and digital technologies in medical education.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Reference</th>
<th valign="top" align="center">Central theme/objective</th>
<th valign="top" align="center">Main variables or constructs</th>
<th valign="top" align="center">Methodological orientation</th>
<th valign="top" align="center">Comparative notes/comments</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Mese et al. (<xref ref-type="bibr" rid="B47">47</xref>)</td>
<td valign="top" align="left">Investigates how AI (especially ChatGPT) integrates with e-learning to support radiology education and proposes combining AI with expert-curated resources to improve accuracy and reliability.</td>
<td valign="top" align="left">ChatGPT, e-learning platforms, radiology curricula, student assessment, ethical concerns (bias, data accuracy).</td>
<td valign="top" align="left">Scoping review using multiple databases.</td>
<td valign="top" align="left">Focuses on radiology; emphasises integration of generative AI with traditional e-learning; raises concerns about accuracy and bias, echoing ethical considerations in the user&#x0027;s review.</td>
</tr>
<tr>
<td valign="top" align="left">Singaram et al. (<xref ref-type="bibr" rid="B48">48</xref>)</td>
<td valign="top" align="left">Assesses digital feedback tools (web platforms, apps, virtual reality and artificial intelligence) used in clinical education and identifies facilitators and barriers to adoption.</td>
<td valign="top" align="left">Digital tools for feedback, convenience, personalized feedback, student perceptions, barriers (technical constraints, data security).</td>
<td valign="top" align="left">Scoping review following Joanna Briggs Institute guidelines.</td>
<td valign="top" align="left">Focuses on feedback processes; highlights VR and AI for real-time feedback; emphasises geographical research imbalance and privacy concerns; complements user&#x0027;s review by addressing feedback dimension.</td>
</tr>
<tr>
<td valign="top" align="left">Lang et al. (<xref ref-type="bibr" rid="B17">17</xref>)</td>
<td valign="top" align="left">Explores how augmented, virtual and mixed reality (collectively extended reality, XR) provide immersive and collaborative learning in radiology, from anatomy teaching to image-guided interventions.</td>
<td valign="top" align="left">XR technologies (AR, VR, MR), collaborative virtual reading rooms, image analysis, procedural training, future AI integration.</td>
<td valign="top" align="left">Narrative/conceptual review.</td>
<td valign="top" align="left">Highlights XR benefits and challenges (technological, economic, ergonomic); anticipates AI-driven personalized learning; aligns closely with the AI/XR synergy emphasised in the user&#x0027;s review.</td>
</tr>
<tr>
<td valign="top" align="left">Awuah et al. (<xref ref-type="bibr" rid="B49">49</xref>)</td>
<td valign="top" align="left">Reviews the integration of AI, machine learning and deep learning into neurosurgical training and simulation, highlighting improvements in patient outcomes and decision-making.</td>
<td valign="top" align="left">AI, ML, DL, neurosurgical simulation, diagnostic and prognostic outcomes, decision-making.</td>
<td valign="top" align="left">Narrative literature review.</td>
<td valign="top" align="left">Focuses on neurosurgical specialty; emphasises AI-assisted simulation across pre-, intra- and postoperative stages; aligns with the user&#x0027;s emphasis on AI improving skills and patient care.</td>
</tr>
<tr>
<td valign="top" align="left">Zavala-Calahorrano et al. (<xref ref-type="bibr" rid="B50">50</xref>)</td>
<td valign="top" align="left">Systematically synthesizes literature on medical technology, AI and ChatGPT across diagnostics, treatment and education, identifying three categories: diagnostic/treatment innovations, medical education and public health/ethics.</td>
<td valign="top" align="left">VR/AR, AI applications, metaverse concepts (lifelogging, mirror-world), medtech, patient-centred care.</td>
<td valign="top" align="left">Systematic review using PRISMA and thematic analysis.</td>
<td valign="top" align="left">Broad scope; highlights VR/AR and metaverse in education; emphasises categories beyond education, providing context for the user&#x0027;s review.</td>
</tr>
<tr>
<td valign="top" align="left">Savage et al. (<xref ref-type="bibr" rid="B51">51</xref>)</td>
<td valign="top" align="left">Evaluates the effectiveness of seven technologies&#x2014;including AI, immersive VR, desktop VR, needle guidance, robotics, AR and haptic devices&#x2014;in regional anaesthesia training.</td>
<td valign="top" align="left">Artificial intelligence, immersive and desktop VR, augmented reality, robotics, haptic feedback, learner confidence and performance.</td>
<td valign="top" align="left">Systematic/scoping review with data extraction and qualitative synthesis.</td>
<td valign="top" align="left">Focuses on a specific procedural domain; demonstrates positive impact of technology-enhanced training but emphasises need for combined traditional and technological approaches.</td>
</tr>
<tr>
<td valign="top" align="left">St Mart et al. (<xref ref-type="bibr" rid="B52">52</xref>)</td>
<td valign="top" align="left">Discusses current AI pathways and technological advances in orthopaedics and how AI could transform surgical education and patient care.</td>
<td valign="top" align="left">AI algorithms, data analytics, robotic surgery, educational implications, patient outcomes.</td>
<td valign="top" align="left">Narrative review.</td>
<td valign="top" align="left">Focuses on orthopaedics; emphasises future integration of AI into operating rooms; aligns with the user&#x0027;s review by extending AI applications into another surgical domain.</td>
</tr>
<tr>
<td valign="top" align="left">Antoniou et al. (<xref ref-type="bibr" rid="B53">53</xref>)</td>
<td valign="top" align="left">Surveys training evolution in urolithiasis management, including high-/low-fidelity simulation, VR, AR and AI, and proposes curriculum pathways.</td>
<td valign="top" align="left">Simulation models, VR, AR, AI, standardized curricula, assessment and mentor-based training.</td>
<td valign="top" align="left">Narrative review.</td>
<td valign="top" align="left">Highlights balanced integration of simulation and mentor-guided training; emphasises stratified curricula; relates to user&#x0027;s focus on curriculum design.</td>
</tr>
<tr>
<td valign="top" align="left">Ramamurthy et al. (<xref ref-type="bibr" rid="B54">54</xref>)</td>
<td valign="top" align="left">Explores potential applications of the metaverse in healthcare, including AI, AR, VR, IoT, quantum computing and robotics, while addressing ethical and legal issues.</td>
<td valign="top" align="left">AI, AR, VR, IoT, metaverse platforms, patient interactions, ethical and legal considerations.</td>
<td valign="top" align="left">Conceptual review.</td>
<td valign="top" align="left">Broad healthcare focus; emphasises training and surgery among other applications; discusses ethics and data vulnerability, echoing concerns in the user&#x0027;s review.</td>
</tr>
<tr>
<td valign="top" align="left">Jaju et al. (<xref ref-type="bibr" rid="B55">55</xref>)</td>
<td valign="top" align="left">Integrative review of how COVID-19 reshaped anesthesiology training, highlighting safety measures, virtual education, AI models in ICUs and the importance of mental health.</td>
<td valign="top" align="left">Remote learning, PPE and barriers, sub-specialty adaptations, AI models, mental health, virtual examinations.</td>
<td valign="top" align="left">Integrative literature review.</td>
<td valign="top" align="left">Focuses on pandemic-driven changes; includes AI applications in ICU and virtual education; relates indirectly to the user&#x0027;s review by contextualizing digital transformation.</td>
</tr>
<tr>
<td valign="top" align="left">Soluk Tekke&#x015F;in et al. (<xref ref-type="bibr" rid="B56">56</xref>)</td>
<td valign="top" align="left">Reviews how digital technologies (e.g., radiology&#x0027;s digital leap, digital pathology) are transforming education, training and diagnostic workflows, especially in oral and maxillofacial pathology.</td>
<td valign="top" align="left">Digital imaging, telehealth, AI applications, workflow integration, educational adaptation.</td>
<td valign="top" align="left">Narrative review.</td>
<td valign="top" align="left">Focuses on dental and pathology domains; addresses digital disruption but less on AI/XR; offers context for broader technological transformation.</td>
</tr>
<tr>
<td valign="top" align="left">Hoogenboom et al. (<xref ref-type="bibr" rid="B57">57</xref>)</td>
<td valign="top" align="left">Reviews training programs for advanced endoscopic imaging (virtual chromoendoscopy, confocal laser endomicroscopy and volumetric laser endomicroscopy) and assesses learning curves and outcomes.</td>
<td valign="top" align="left">Advanced imaging techniques, didactic and web-based programs, learning curves, training outcomes, future AI assistance.</td>
<td valign="top" align="left">Narrative review of literature up to March 2020.</td>
<td valign="top" align="left">Focuses on GI endoscopy; emphasises web-based training efficacy; suggests need for standardized programs; mentions potential AI assistance.</td>
</tr>
<tr>
<td valign="top" align="left">Finocchiaro et al. (<xref ref-type="bibr" rid="B58">58</xref>)</td>
<td valign="top" align="left">Surveys technological developments in GI endoscopy simulators&#x2014;from mechanical systems and mechatronic devices to animal models&#x2014;and discusses emerging technologies such as AI, AR and robotics.</td>
<td valign="top" align="left">Simulation platforms (mechanical, mechatronic, animal-based), AI, AR, robotics, training needs.</td>
<td valign="top" align="left">Narrative review.</td>
<td valign="top" align="left">Examines simulation evolution; emphasises realism and pre-clinical practice; relates to user&#x0027;s review through simulation and emerging AI/AR technologies.</td>
</tr>
<tr>
<td valign="top" align="left">Kranjcevic et al. (<xref ref-type="bibr" rid="B59">59</xref>)</td>
<td valign="top" align="left">Narrates how the COVID-19 pandemic accelerated e-learning, teleconferencing and digital tools in academic medicine and outlines the transformation of educators&#x2019; roles.</td>
<td valign="top" align="left">E-learning, teleconferencing, digital publishing, educator roles, resilience, global outreach.</td>
<td valign="top" align="left">Narrative literature review.</td>
<td valign="top" align="left">Does not focus on AI/XR; provides contextual information on digital transformation during the pandemic.</td>
</tr>
<tr>
<td valign="top" align="left">Our study</td>
<td valign="top" align="left">Integration of Artificial Intelligence and Extended Reality in Medical Education</td>
<td valign="top" align="left">Digital technologies (AI, XR), Learning outcomes (skills, engagement, performance), Educational modalities</td>
<td valign="top" align="left"/>
<td valign="top" align="left">Synthesizes 13 empirical studies from 2019 to 2024, focusing on AI/XR in medical education. Identifies trends, barriers, and curricular implications</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Some reviews have focused on specific tools or technologies, such as the use of ChatGPT in radiology training (<xref ref-type="bibr" rid="B47">47</xref>). While that review highlights the potential of generative AI to produce assessments and learning materials and points out risks such as bias and misinformation, it focuses exclusively on content generation rather than broader pedagogical outcomes. In contrast, the present study explores AI in relation to student engagement, clinical skill acquisition, and curriculum design, offering a systemic perspective that goes beyond a single educational tool.</p>
<p>Other studies have emphasized feedback mechanisms in clinical education (<xref ref-type="bibr" rid="B48">48</xref>), particularly the role of digital platforms, mobile applications, and virtual reality in facilitating personalized, real-time feedback. While our review acknowledges the value of interactive learning environments, it places greater emphasis on the curricular and infrastructure implications of adopting AI and XR, identifying challenges such as a lack of faculty training, limited technological infrastructure, and ethical considerations that may hinder implementation. Both reviews agree on the need for governance frameworks that ensure data privacy and the effectiveness of the tools, often in specific specialties such as radiology (<xref ref-type="bibr" rid="B17">17</xref>), neurosurgery (<xref ref-type="bibr" rid="B49">49</xref>), or anesthesiology (<xref ref-type="bibr" rid="B51">51</xref>). These reviews typically highlight the pedagogical advantages of XR for procedural training, spatial awareness, and simulation-based learning. Our review incorporates these findings, but expands the analysis across various disciplines, demonstrating that XR not only supports technical skills, but also fosters engagement and interactive learning in diverse clinical contexts, such as pediatrics, ophthalmology, and general medicine.</p>
<p>Broader thematic syntheses have also emerged. One review categorizes technological innovations in diagnosis, treatment, and education (<xref ref-type="bibr" rid="B50">50</xref>), including the use of the metaverse and patient-centered design. While this typology is useful for understanding the broader application of digital tools in healthcare, it lacks the curriculum specificity and empirical grounding present in our review. Similarly, reviews addressing the metaverse and telemedicine (<xref ref-type="bibr" rid="B54">54</xref>), or transformations during the COVID-19 pandemic (<xref ref-type="bibr" rid="B55">55</xref>), provide contextual information but offer little depth regarding educational implementation or learning outcomes.</p>
<p>A recurring theme in other reviews is the need to balance technological advances with traditional training approaches. This is particularly evident in areas such as regional anesthesia training (<xref ref-type="bibr" rid="B51">51</xref>) and urology education (<xref ref-type="bibr" rid="B53">53</xref>), where the value of mentor-led instruction remains, even as digital tools enhance procedural training. Our findings fully align with this perspective, advocating for the integration&#x2014;not the replacement&#x2014;of established pedagogical practices with AI and XR technologies. This hybrid approach ensures that digital innovation complements human interaction, reflective practice, and clinical reasoning.</p>
<p>While several reviews emphasize technical effectiveness, such as improved diagnostic accuracy or faster skill acquisition, systematically few address the curricular implications of these technologies. This study rectifies this deficiency by identifying the need for institutional policies, faculty training, and infrastructure development to support the sustainable integration of technology. Reviews focused on specialized training in orthopedics (<xref ref-type="bibr" rid="B52">52</xref>), dentistry (<xref ref-type="bibr" rid="B56">56</xref>), or gastroenterology (<xref ref-type="bibr" rid="B57">57</xref>, <xref ref-type="bibr" rid="B58">58</xref>) provide valuable insights, but often limit their analysis to specific use cases within each discipline without extrapolating to broader curricular frameworks.</p>
<p>Our study differs methodologically from several previous works. While many are narrative or conceptual in nature, this review adopts a structured methodology using the PRISMA and PICO frameworks and focuses specifically on empirical educational interventions. By jointly examining AI-only, XR-only, and emerging AI&#x2013;XR hybrid applications, it maps how these tools are currently used to support knowledge acquisition, skills performance, and learner engagement across different clinical disciplines. This synthesis underpins the curriculum-oriented recommendations developed in Sections 4.1 and 4.2, particularly in relation to competency-based design, digital literacy, and the conditions required for sustainable implementation.</p>
</sec>
<sec id="s4d"><label>4.4</label><title>Limitations</title>
<p>This study provides an overview of the integration of AI and XR in medical education, but several limitations should be noted. First, the methodological diversity of the included studies introduces significant variability in the quality of the research and the measurement of outcomes. Many studies used cross-sectional surveys or descriptive case studies, which, while useful for exploring feasibility and user perceptions, offer limited capacity to infer causality or assess long-term educational impact. The scarcity of randomized controlled trials and longitudinal follow-ups reduces the strength of the evidence and limits the generalizability of the observed effects to other institutions and populations.</p>
<p>A further limitation is the variable and often small sample size; some pilot studies included fewer than ten participants. These small groups limit the statistical power of the results and restrict the conclusions that can be drawn about the scalability and reproducibility of AI and XR interventions. In addition, a substantial proportion of the included reports were explicitly described as pilot, feasibility, or development studies. These early-stage designs were deliberately retained because AI- and XR-based interventions in medical and health-professions education remain an emerging field, and many innovations are first reported in small-scale or developmental projects. However, the preliminary nature of these studies means that effect estimates should be interpreted with caution and cannot be assumed to translate directly to large-scale curricular implementation. Specifically, several studies did not provide sufficient demographic or contextual information, such as participants&#x0027; educational level or prior experience with digital tools&#x2014;variables relevant for evaluating learning outcomes.</p>
<p>A related limitation is the lack of standardized outcome measures. The tools used to assess skill acquisition, participation, or performance were heterogeneous, ranging from subjective self-assessments to simulation success rates and machine learning rankings. This inconsistency hinders comparisons between studies and compromises the possibility of conducting meta-analyses or establishing benchmark performance indicators. Additionally, many reports did not provide complete descriptive statistics or effect size estimates, which further limits the interpretability of the findings and hampers direct comparison across interventions.</p>
<p>Technological and institutional barriers also limited the scope of many interventions. Challenges such as insufficient digital infrastructure, limited teacher training, high implementation costs, and resistance to technological change were frequently reported. These factors not only affect the feasibility of large-scale AI/XR implementation but also influence study design, often limiting the depth or rigor of intervention evaluations. A further limitation is the heterogeneity with which the technological components themselves were reported. Some articles provide detailed descriptions of algorithms, platforms, and implementation workflows, whereas others focus mainly on curricular integration or learner outcomes and describe the underlying technology only in broad terms. This inconsistency reduces the granularity with which specific AI and XR tools can be compared across studies.</p>
<p>This review also primarily identified research reporting positive outcomes or improvements in learning, with fewer studies critically examining implementation failures or neutral effects. Current literature may therefore overestimate the effectiveness of these technologies in educational settings; while emerging data are promising, caution is advised when interpreting the results. More robust, comparative, and longitudinal research is needed to validate the educational value of AI and XR in diverse medical training contexts.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions"><label>5</label><title>Conclusion</title>
<p>The current evidence base on artificial intelligence and extended reality in medical and health-professions education is still limited and methodologically heterogeneous. Most of the 13 included studies were small, single-center evaluations with short follow-up and predominantly observational or descriptive designs, using non-standardized outcome measures and rarely examining transfer to real clinical practice. These features substantially weaken the strength and generalizability of the available evidence.</p>
<p>Within this context, reported benefits for knowledge, skills, performance, and engagement should be regarded as promising but preliminary signals rather than definitive proof of superiority over high-quality conventional instruction. Only a minority of interventions to date have implemented fully integrated AI&#x2013;XR ecosystems; most studies evaluated AI-only or XR-only approaches. Claims about the added value of combining AI with XR must therefore remain cautious and are currently supported by a very small empirical base, further constrained by persistent implementation barriers such as costs, technical reliability, data protection, and insufficient faculty development.</p>
<p>Future research should prioritize rigorously designed studies with adequate sample sizes, validated and comparable outcomes, and longitudinal follow-up, explicitly distinguishing between AI-only, XR-only, and fully integrated AI&#x2013;XR interventions. In parallel, efforts to strengthen digital literacy, ethical and regulatory awareness, and faculty preparation within competency-based curricula will be essential to determine whether these technologies can deliver sustainable and equitable improvements in medical education.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability"><title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/Supplementary Material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec id="s7" sec-type="author-contributions"><title>Author contributions</title>
<p>TT: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. DV: Conceptualization, Formal analysis, Investigation, Methodology, Resources, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. MG: Conceptualization, Data curation, Formal analysis, Resources, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. BR: Conceptualization, Formal analysis, Methodology, Resources, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. RT-F: Conceptualization, Formal analysis, Investigation, Methodology, Validation, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<ack><title>Acknowledgments</title>
<p>The authors would like to thank Corporaci&#x00F3;n Ecuatoriana para el Desarrollo de la Investigaci&#x00F3;n y Academia&#x2014;CEDIA for the support given to the present research, development, and innovation work through its CEPRA program 2022, especially for the &#x201C;Proyecto 18&#x2013;Tecnologias Inmersivas&#x201D; fund.</p>
</ack>
<sec id="s9" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. Grammarly AI Pro has been used only for English improvement.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s11" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>V</given-names></name> <name><surname>Kharb</surname> <given-names>P</given-names></name></person-group>. <article-title>A paradigm shift from teaching to learning gross anatomy: meta-analysis of implications for instructional methods</article-title>. <source>J Anat Soc India</source>. (<year>2013</year>) <volume>62</volume>:<fpage>84</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/S0003-2778(13)80019-6</pub-id></mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lewis</surname> <given-names>KO</given-names></name> <name><surname>Popov</surname> <given-names>V</given-names></name> <name><surname>Fatima</surname> <given-names>SS</given-names></name></person-group>. <article-title>From static web to metaverse: reinventing medical education in the post pandemic era</article-title>. <source>Ann Med</source>. (<year>2024</year>) <volume>56</volume>(<issue>1</issue>):<fpage>2305694</fpage>. <pub-id pub-id-type="doi">10.1080/07853890.2024.2305694</pub-id><pub-id pub-id-type="pmid">38261592</pub-id></mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zaidi</surname> <given-names>S</given-names></name> <name><surname>Nasir</surname> <given-names>M</given-names></name></person-group>. <article-title>Clinical teaching</article-title>. In: <source>Teaching and Learning Methods in Medicine</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name> (<year>2015</year>). <pub-id pub-id-type="doi">10.1007/978-3-319-06850-3_7</pub-id></mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Boshuizen</surname> <given-names>HPA</given-names></name> <name><surname>Schmidt</surname> <given-names>HG</given-names></name></person-group>. <article-title>On the role of biomedical knowledge in clinical reasoning by experts, intermediates and novices</article-title>. <source>Cogn Sci</source>. (<year>1992</year>) <volume>16</volume>:<fpage>153</fpage>&#x2013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1207/s15516709cog1602_1</pub-id></mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bhardwaj</surname> <given-names>V</given-names></name> <name><surname>Zhang</surname> <given-names>S</given-names></name> <name><surname>Tan</surname> <given-names>YQ</given-names></name> <name><surname>Pandey</surname> <given-names>V</given-names></name></person-group>. <article-title>Redefining learning: student-centered strategies for academic and personal growth</article-title>. <source>Front Educ</source>. (<year>2025</year>) <volume>10</volume>:<fpage>1518602</fpage>. <pub-id pub-id-type="doi">10.3389/feduc.2025.1518602</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Akhtar</surname> <given-names>ZB</given-names></name> <name><surname>Rawol</surname> <given-names>AT</given-names></name></person-group>. <article-title>Artificial intelligence (AI) and extended reality (XR): a biomedical engineering perspective investigation analysis</article-title>. <source>Int J Electr Eng Manag Innov</source>. (<year>2024</year>) <volume>6</volume>(<issue>3</issue>):<fpage>5</fpage>. <pub-id pub-id-type="doi">10.35882/ijeeemi.v6i3.5</pub-id></mixed-citation></ref>
<ref id="B7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gligorea</surname> <given-names>I</given-names></name> <name><surname>Cioca</surname> <given-names>M</given-names></name> <name><surname>Oancea</surname> <given-names>R</given-names></name> <name><surname>Gorski</surname> <given-names>A-T</given-names></name> <name><surname>Gorski</surname> <given-names>H</given-names></name> <name><surname>Tudorache</surname> <given-names>P</given-names></name></person-group>. <article-title>Adaptive learning using artificial intelligence in e-learning: a literature review</article-title>. <source>Educ Sci</source>. (<year>2023</year>) <volume>13</volume>:<fpage>1216</fpage>. <pub-id pub-id-type="doi">10.3390/educsci13121216</pub-id></mixed-citation></ref>
<ref id="B8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lalit</surname></name> <name><surname>Kumar</surname><given-names>Y</given-names></name> <name><surname>Anu</surname></name> <name><surname>Kumar</surname><given-names>S</given-names></name> <name><surname>Khurana</surname> <given-names>D</given-names></name> <name><surname>Mrinal</surname></name></person-group>. <article-title>A study on the application of machine learning in adaptive intelligent tutoring systems</article-title>. <source>Int J Environ Sci</source>. (<year>2025</year>) (<issue>13s</issue>):<fpage>11</fpage>. <pub-id pub-id-type="doi">10.64252/akj6b797</pub-id></mixed-citation></ref>
<ref id="B9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Strielkowski</surname> <given-names>W</given-names></name> <name><surname>Grebennikova</surname> <given-names>V</given-names></name> <name><surname>Lisovskiy</surname> <given-names>A</given-names></name> <name><surname>Rakhimova</surname> <given-names>G</given-names></name> <name><surname>Vasileva</surname> <given-names>T</given-names></name></person-group>. <article-title>AI-driven adaptive learning for sustainable educational transformation</article-title>. <source>Sustain Dev</source>. (<year>2025</year>) <volume>33</volume>(<issue>2</issue>):<fpage>1921</fpage>&#x2013;<lpage>47</lpage>. <pub-id pub-id-type="doi">10.1002/sd.3221</pub-id></mixed-citation></ref>
<ref id="B10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sapci</surname> <given-names>AH</given-names></name> <name><surname>Sapci</surname> <given-names>HA</given-names></name></person-group>. <article-title>Artificial intelligence education and tools for medical and health informatics students: systematic review</article-title>. <source>JMIR Med Educ</source>. (<year>2020</year>) <volume>6</volume>(<issue>1</issue>):<fpage>e19285</fpage>. <pub-id pub-id-type="doi">10.2196/19285</pub-id><pub-id pub-id-type="pmid">32602844</pub-id></mixed-citation></ref>
<ref id="B11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kostadimas</surname> <given-names>D</given-names></name> <name><surname>Kasapakis</surname> <given-names>V</given-names></name> <name><surname>Kotis</surname> <given-names>K</given-names></name></person-group>. <article-title>A systematic review on the combination of VR, IoT and AI technologies, and their integration in applications</article-title>. <source>Future Internet</source>. (<year>2025</year>) <volume>17</volume>:<fpage>163</fpage>. <pub-id pub-id-type="doi">10.3390/fi17040163</pub-id></mixed-citation></ref>
<ref id="B12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Constantinescu</surname> <given-names>G-G</given-names></name> <name><surname>Silion</surname> <given-names>D</given-names></name> <name><surname>Iftene</surname> <given-names>A</given-names></name></person-group>. <article-title>Using artificial intelligence and mixed realities to create educational applications of the future</article-title>. <source>Procedia Comput Sci</source>. (<year>2024</year>) <volume>246</volume>:<fpage>3313</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1016/j.procs.2024.09.308</pub-id></mixed-citation></ref>
<ref id="B13"><label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hmoud</surname> <given-names>M</given-names></name> <name><surname>Daher</surname> <given-names>W</given-names></name> <name><surname>Ayyoub</surname> <given-names>A</given-names></name></person-group>. <article-title>The impact of AI, XR, and combined AI-XR on student satisfaction: a moderated mediation analysis of engagement and learner characteristics</article-title>. <source>IEEE Access</source>. (<year>2025</year>) <volume>13</volume>:<fpage>140614</fpage>&#x2013;<lpage>28</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2025.3597239</pub-id></mixed-citation></ref>
<ref id="B14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>C</given-names></name> <name><surname>Cerfolio</surname> <given-names>RJ</given-names></name></person-group>. <article-title>Virtual or augmented reality to enhance surgical education and surgical planning</article-title>. <source>Thorac Surg Clin</source>. (<year>2019</year>) <volume>29</volume>:<fpage>329</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1016/j.thorsurg.2019.03.010</pub-id><pub-id pub-id-type="pmid">31235302</pub-id></mixed-citation></ref>
<ref id="B15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khor</surname> <given-names>WS</given-names></name> <name><surname>Baker</surname> <given-names>B</given-names></name> <name><surname>Amin</surname> <given-names>K</given-names></name> <name><surname>Chan</surname> <given-names>A</given-names></name> <name><surname>Patel</surname> <given-names>K</given-names></name> <name><surname>Wong</surname> <given-names>J</given-names></name></person-group>. <article-title>Augmented and virtual reality in surgery&#x2014;the digital surgical environment: applications, limitations and legal pitfalls</article-title>. <source>Ann Transl Med</source>. (<year>2016</year>) <volume>4</volume>(<issue>23</issue>):<fpage>454</fpage>. <pub-id pub-id-type="doi">10.21037/atm.2016.12.23</pub-id><pub-id pub-id-type="pmid">28090510</pub-id></mixed-citation></ref>
<ref id="B16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lastrucci</surname> <given-names>A</given-names></name> <name><surname>Wandael</surname> <given-names>Y</given-names></name> <name><surname>Barra</surname> <given-names>A</given-names></name> <name><surname>Ricci</surname> <given-names>R</given-names></name> <name><surname>Maccioni</surname> <given-names>G</given-names></name> <name><surname>Pirrera</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Exploring augmented reality integration in diagnostic imaging: myth or reality?</article-title> <source>Diagnostics</source>. (<year>2024</year>) <volume>14</volume>:<fpage>1333</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics14131333</pub-id><pub-id pub-id-type="pmid">39001224</pub-id></mixed-citation></ref>
<ref id="B17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lang</surname> <given-names>M</given-names></name> <name><surname>Ghandour</surname> <given-names>S</given-names></name> <name><surname>Rikard</surname> <given-names>B</given-names></name> <name><surname>Balasalle</surname> <given-names>EK</given-names></name> <name><surname>Rouhezamin</surname> <given-names>MR</given-names></name> <name><surname>Zhang</surname> <given-names>H</given-names></name><etal/></person-group> <article-title>Medical extended reality for radiology education and training</article-title>. <source>J Am Coll Radiol</source>. (<year>2024</year>) <volume>21</volume>(<issue>10</issue>):<fpage>1583</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.1016/j.jacr.2024.05.006</pub-id><pub-id pub-id-type="pmid">38866067</pub-id></mixed-citation></ref>
<ref id="B18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Akhtar</surname> <given-names>ZB</given-names></name> <name><surname>Rawol</surname> <given-names>AT</given-names></name></person-group>. <article-title>Artificial intelligence (AI) &#x0026; extended reality (XR) within healthcare: a multidimensional biomedical engineering (BME)</article-title>. <source>Investigative Exploration Biomed Clin Res</source>. (<year>2025</year>) <volume>1</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>22</lpage>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="http://02.2025/BCRJ/003">http"//02.2025/BCRJ/003</ext-link> (Accessed July 07, 2025).</comment></mixed-citation></ref>
<ref id="B19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>Y</given-names></name> <name><surname>Gaertner</surname> <given-names>R</given-names></name></person-group>. <article-title>Technology transfer from university to industry: a large-scale experiment with technology development and commercialization</article-title>. <source>Policy Stud J</source>. (<year>1994</year>) <volume>22</volume>(<issue>2</issue>):<fpage>384</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1111/j.1541-0072.1994.tb01476.x</pub-id></mixed-citation></ref>
<ref id="B20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Siripurapu</surname> <given-names>S</given-names></name> <name><surname>Darimireddy</surname> <given-names>NK</given-names></name> <name><surname>Chehri</surname> <given-names>A</given-names></name> <name><surname>Sridhar</surname> <given-names>B</given-names></name> <name><surname>Paramkusam</surname> <given-names>AV</given-names></name></person-group>. <article-title>Technological advancements and elucidation gadgets for healthcare applications: an exhaustive methodological review-part-I (AI, big data, block chain, open-source technologies, and cloud computing)</article-title>. <source>Electronics (Basel)</source>. (<year>2023</year>) <volume>12</volume>:<fpage>750</fpage>. <pub-id pub-id-type="doi">10.3390/electronics12030750</pub-id></mixed-citation></ref>
<ref id="B21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Murala</surname> <given-names>DK</given-names></name> <name><surname>Panda</surname> <given-names>SK</given-names></name> <name><surname>Dash</surname> <given-names>SP</given-names></name></person-group>. <article-title>Medmetaverse: medical care of chronic disease patients and managing data using artificial intelligence, blockchain, and wearable devices state-of-the-art methodology</article-title>. <source>IEEE Access</source>. (<year>2023</year>) <volume>11</volume>:<fpage>138954</fpage>&#x2013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2023.3340791</pub-id></mixed-citation></ref>
<ref id="B22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khamis</surname> <given-names>T</given-names></name> <name><surname>Naseem</surname> <given-names>A</given-names></name> <name><surname>Khamis</surname> <given-names>A</given-names></name> <name><surname>Petrucka</surname> <given-names>P</given-names></name></person-group>. <article-title>The COVID-19 pandemic: a catalyst for creativity and collaboration for online learning and work-based higher education systems and processes</article-title>. <source>J Work-Appl Manag</source>. (<year>2021</year>) <volume>13</volume>(<issue>2</issue>):<fpage>184</fpage>&#x2013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.1108/JWAM-01-2021-0010</pub-id></mixed-citation></ref>
<ref id="B23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>dela Cruz</surname> <given-names>AM</given-names></name> <name><surname>Alick</surname> <given-names>S</given-names></name> <name><surname>Das</surname> <given-names>R</given-names></name> <name><surname>Brenner</surname> <given-names>A</given-names></name></person-group>. <article-title>Same material, different formats: comparing in-person and distance learning in undergraduate medical education</article-title>. <source>Acad Psychiatry</source>. (<year>2020</year>) <volume>44</volume>(<issue>6</issue>):<fpage>659</fpage>&#x2013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1007/s40596-020-01333-7</pub-id><pub-id pub-id-type="pmid">33058051</pub-id></mixed-citation></ref>
<ref id="B24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kinshuk; Chen</surname> <given-names>N-S</given-names></name> <name><surname>Cheng</surname> <given-names>I-L</given-names></name> <name><surname>Chew</surname> <given-names>SW</given-names></name></person-group>. <article-title>Evolution is not enough: revolutionizing current learning environments to smart learning environments</article-title>. <source>Int J Artif Intell Educ</source>. (<year>2016</year>) <volume>26</volume>(<issue>2</issue>):<fpage>561</fpage>&#x2013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1007/s40593-016-0108-x</pub-id></mixed-citation></ref>
<ref id="B25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fahim</surname> <given-names>YA</given-names></name> <name><surname>Hasani</surname> <given-names>IW</given-names></name> <name><surname>Kabba</surname> <given-names>S</given-names></name> <name><surname>Ragab</surname> <given-names>WM</given-names></name></person-group>. <article-title>Artificial intelligence in healthcare and medicine: clinical applications, therapeutic advances, and future perspectives</article-title>. <source>Eur J Med Res</source>. (<year>2025</year>) <volume>30</volume>(<issue>1</issue>):<fpage>848</fpage>. <pub-id pub-id-type="doi">10.1186/s40001-025-03196-w</pub-id><pub-id pub-id-type="pmid">40988064</pub-id></mixed-citation></ref>
<ref id="B26"><label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Halman</surname> <given-names>J</given-names></name> <name><surname>Tencer</surname> <given-names>S</given-names></name> <name><surname>Siemi&#x0144;ski</surname> <given-names>M</given-names></name></person-group>. <article-title>Artificial intelligence and extended reality in the training of vascular surgeons: a narrative review</article-title>. <source>Med Sci</source>. (<year>2025</year>) <volume>13</volume>:<fpage>126</fpage>. <pub-id pub-id-type="doi">10.3390/medsci13030126</pub-id></mixed-citation></ref>
<ref id="B27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arr&#x00E1;ez-Aybar</surname> <given-names>LA</given-names></name></person-group>. <article-title>Evolving anatomy education: bridging dissection, traditional methods, and technological innovation for clinical excellence</article-title>. <source>Anatomia</source>. (<year>2025</year>) <volume>4</volume>:<fpage>9</fpage>. <pub-id pub-id-type="doi">10.3390/anatomia4020009</pub-id></mixed-citation></ref>
<ref id="B28"><label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rahman</surname> <given-names>MH</given-names></name> <name><surname>Ahmad</surname> <given-names>AB</given-names></name> <name><surname>Sawal</surname> <given-names>MZHBM</given-names></name></person-group>. <article-title>The influence of personal factors on resistance to technology adoption in university libraries in Bangladesh</article-title>. <source>Inf Dev</source>. (<year>2024</year>). <comment>Online First</comment>. <pub-id pub-id-type="doi">10.1177/02666669241257196</pub-id></mixed-citation></ref>
<ref id="B29"><label>29.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>AG</given-names></name></person-group>. <article-title>AI- and XR-powered digital therapeutics (DTx) innovations</article-title>. In: <person-group person-group-type="editor"><name><surname>Chen</surname> <given-names>Y</given-names></name> <name><surname>Blasch</surname> <given-names>E</given-names></name></person-group>, editors. <source>Digital Frontiers &#x2013; Healthcare, Education, and Society in the Metaverse Era</source>. <publisher-loc>London, UK</publisher-loc>: <publisher-name>IntechOpen</publisher-name> (<year>2024</year>):<fpage>180</fpage>. <pub-id pub-id-type="doi">10.5772/intechopen.1006619</pub-id></mixed-citation></ref>
<ref id="B30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Masoumian Hosseini</surname> <given-names>ST</given-names></name> <name><surname>Qayumi</surname> <given-names>K</given-names></name> <name><surname>Pourabbasi</surname> <given-names>A</given-names></name> <name><surname>Haghighi</surname> <given-names>E</given-names></name> <name><surname>Sabet</surname> <given-names>B</given-names></name> <name><surname>Koohpaei</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Are we ready to integrate modern technologies into the medical curriculum for students? A systematic review</article-title>. <source>Discov Educ</source>. (<year>2025</year>) <volume>4</volume>(<issue>1</issue>):<fpage>114</fpage>. <pub-id pub-id-type="doi">10.1007/s44217-025-00521-7</pub-id></mixed-citation></ref>
<ref id="B31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parums</surname> <given-names>DV</given-names></name></person-group>. <article-title>Editorial: review articles, systematic reviews, meta-analysis, and the updated preferred reporting items for systematic reviews and meta-analyses (PRISMA) 2020 guidelines</article-title>. <source>Med Sci Monit</source>. (<year>2021</year>) <volume>27</volume>:<fpage>e934475</fpage>. <pub-id pub-id-type="doi">10.12659/MSM.934475</pub-id><pub-id pub-id-type="pmid">34421116</pub-id></mixed-citation></ref>
<ref id="B32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Page</surname> <given-names>MJ</given-names></name> <name><surname>McKenzie</surname> <given-names>JE</given-names></name> <name><surname>Bossuyt</surname> <given-names>PM</given-names></name> <name><surname>Boutron</surname> <given-names>I</given-names></name> <name><surname>Hoffmann</surname> <given-names>TC</given-names></name> <name><surname>Mulrow</surname> <given-names>CD</given-names></name><etal/></person-group> <article-title>Updating guidance for reporting systematic reviews: development of the PRISMA 2020 statement</article-title>. <source>J Clin Epidemiol</source>. (<year>2021</year>) <volume>134</volume>:<fpage>103</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/j.jclinepi.2021.02.003</pub-id><pub-id pub-id-type="pmid">33577987</pub-id></mixed-citation></ref>
<ref id="B33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Booth</surname> <given-names>A</given-names></name></person-group>. <article-title>Searching for qualitative research for inclusion in systematic reviews: a structured methodological review</article-title>. <source>Syst Rev</source>. (<year>2016</year>) <volume>5</volume>(<issue>1</issue>):<fpage>74</fpage>. <pub-id pub-id-type="doi">10.1186/s13643-016-0249-x</pub-id><pub-id pub-id-type="pmid">27145932</pub-id></mixed-citation></ref>
<ref id="B34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khan</surname> <given-names>Z</given-names></name> <name><surname>Adil</surname> <given-names>T</given-names></name> <name><surname>Oduoye</surname> <given-names>MO</given-names></name> <name><surname>Khan</surname> <given-names>BS</given-names></name> <name><surname>Ayyazuddin</surname> <given-names>M</given-names></name></person-group>. <article-title>Assessing the knowledge, attitude and perception of extended reality (XR) technology in Pakistan&#x2019;s healthcare community in an era of artificial intelligence</article-title>. <source>Front Med</source>. (<year>2024</year>) <volume>11</volume>:<fpage>1456017</fpage>. <pub-id pub-id-type="doi">10.3389/fmed.2024.1456017</pub-id></mixed-citation></ref>
<ref id="B35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bonfitto</surname> <given-names>GR</given-names></name> <name><surname>Roletto</surname> <given-names>A</given-names></name> <name><surname>Savardi</surname> <given-names>M</given-names></name> <name><surname>Fasulo</surname> <given-names>SV</given-names></name> <name><surname>Catania</surname> <given-names>D</given-names></name> <name><surname>Signoroni</surname> <given-names>A</given-names></name></person-group>. <article-title>Harnessing ChatGPT dialogues to address claustrophobia in MRI&#x2014;a radiographers&#x2019; education perspective</article-title>. <source>Radiography</source>. (<year>2024</year>) <volume>30</volume>(<issue>3</issue>):<fpage>737</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.1016/j.radi.2024.02.015</pub-id><pub-id pub-id-type="pmid">38428198</pub-id></mixed-citation></ref>
<ref id="B36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Artemiou</surname> <given-names>E</given-names></name> <name><surname>Hooper</surname> <given-names>S</given-names></name> <name><surname>Dascanio</surname> <given-names>L</given-names></name> <name><surname>Schmidt</surname> <given-names>M</given-names></name> <name><surname>Gilbert</surname> <given-names>G</given-names></name></person-group>. <article-title>Introducing AI-generated cases (AI-cases) &#x0026; standardized clients (AI-SCs) in communication training for veterinary students: perceptions and adoption challenges</article-title>. <source>Front Vet Sci</source>. (<year>2024</year>) <volume>11</volume>:<fpage>1504598</fpage>. <pub-id pub-id-type="doi">10.3389/fvets.2024.1504598</pub-id><pub-id pub-id-type="pmid">40066104</pub-id></mixed-citation></ref>
<ref id="B37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prevezanou</surname> <given-names>K</given-names></name> <name><surname>Seimenis</surname> <given-names>I</given-names></name> <name><surname>Karaiskos</surname> <given-names>P</given-names></name> <name><surname>Pikoulis</surname> <given-names>E</given-names></name> <name><surname>Lykoudis</surname> <given-names>PM</given-names></name> <name><surname>Loukas</surname> <given-names>C</given-names></name></person-group>. <article-title>Machine learning approaches for evaluating the progress of surgical training on a virtual reality simulator</article-title>. <source>Appl Sci</source>. (<year>2024</year>) <volume>14</volume>:<fpage>9677</fpage>. <pub-id pub-id-type="doi">10.3390/app14219677</pub-id></mixed-citation></ref>
<ref id="B38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tolentino</surname> <given-names>R</given-names></name> <name><surname>Rodriguez</surname> <given-names>C</given-names></name> <name><surname>Hersson-Edery</surname> <given-names>F</given-names></name> <name><surname>Lane</surname> <given-names>J</given-names></name> <name><surname>Abbasgholizadeh Rahimi</surname> <given-names>S</given-names></name></person-group>. <article-title>Perspectives on virtual interviews and emerging technologies integration in family medicine residency programs: a cross-sectional survey study</article-title>. <source>BMC Med Educ</source>. (<year>2024</year>) <volume>24</volume>(<issue>1</issue>):<fpage>975</fpage>. <pub-id pub-id-type="doi">10.1186/s12909-024-05874-5</pub-id><pub-id pub-id-type="pmid">39245713</pub-id></mixed-citation></ref>
<ref id="B39"><label>39.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Latour</surname> <given-names>M</given-names></name> <name><surname>Alvarez</surname> <given-names>I</given-names></name> <name><surname>Knackstedt</surname> <given-names>M</given-names></name> <name><surname>Yim</surname> <given-names>M</given-names></name></person-group>. <article-title>Virtually augmented surgical navigation in endoscopic Sinus surgery simulation training: a prospective trial of repeated measures</article-title>. <source>Otolaryngol&#x2013;Head Neck Surg</source>. (<year>2024</year>) <volume>171</volume>(<issue>6</issue>):<fpage>1897</fpage>&#x2013;<lpage>903</lpage>. <pub-id pub-id-type="doi">10.1002/ohn.945</pub-id></mixed-citation></ref>
<ref id="B40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Real</surname> <given-names>FJ</given-names></name> <name><surname>Whitehead</surname> <given-names>M</given-names></name> <name><surname>Ollberding</surname> <given-names>NJ</given-names></name> <name><surname>Rosen</surname> <given-names>BL</given-names></name> <name><surname>Meisman</surname> <given-names>A</given-names></name> <name><surname>Crosby</surname> <given-names>LE</given-names></name><etal/></person-group> <article-title>A virtual reality curriculum to enhance Residents&#x2019; behavioral health anticipatory guidance skills: a pilot trial</article-title>. <source>Acad Pediatr</source>. (<year>2023</year>) <volume>23</volume>(<issue>1</issue>):<fpage>185</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1016/j.acap.2022.07.011</pub-id><pub-id pub-id-type="pmid">35870801</pub-id></mixed-citation></ref>
<ref id="B41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krive</surname> <given-names>J</given-names></name> <name><surname>Isola</surname> <given-names>M</given-names></name> <name><surname>Chang</surname> <given-names>L</given-names></name> <name><surname>Patel</surname> <given-names>T</given-names></name> <name><surname>Anderson</surname> <given-names>M</given-names></name> <name><surname>Sreedhar</surname> <given-names>R</given-names></name></person-group>. <article-title>Grounded in reality: artificial intelligence in medical education</article-title>. <source>JAMIA Open</source>. (<year>2023</year>) <volume>6</volume>(<issue>2</issue>):<fpage>ooaad037</fpage>. <pub-id pub-id-type="doi">10.1093/jamiaopen/ooaad037</pub-id></mixed-citation></ref>
<ref id="B42"><label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mergen</surname> <given-names>M</given-names></name> <name><surname>Junga</surname> <given-names>A</given-names></name> <name><surname>Risse</surname> <given-names>B</given-names></name> <name><surname>Valkov</surname> <given-names>D</given-names></name> <name><surname>Graf</surname> <given-names>N</given-names></name> <name><surname>Marschall</surname> <given-names>B</given-names></name><etal/></person-group> <article-title>Immersive training of clinical decision making with AI driven virtual patients &#x2013; A new VR platform called medical tr.AI.ning</article-title>. <source>GMS J Med Educ</source>. (<year>2023</year>) <volume>40</volume>(<issue>2</issue>):<fpage>Doc18</fpage>. <pub-id pub-id-type="doi">10.3205/zma001600</pub-id><pub-id pub-id-type="pmid">37361242</pub-id></mixed-citation></ref>
<ref id="B43"><label>43.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tsopra</surname> <given-names>R</given-names></name> <name><surname>Peiffer-Smadja</surname> <given-names>N</given-names></name> <name><surname>Charlier</surname> <given-names>C</given-names></name> <name><surname>Campeotto</surname> <given-names>F</given-names></name> <name><surname>Lemogne</surname> <given-names>C</given-names></name> <name><surname>Ruszniewski</surname> <given-names>P</given-names></name><etal/></person-group> <article-title>Putting undergraduate medical students in AI-CDSS Designers&#x2019; shoes: an innovative teaching method to develop digital health critical thinking</article-title>. <source>Int J Med Inform</source>. (<year>2023</year>) <volume>171</volume>:<fpage>104980</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2022.104980</pub-id><pub-id pub-id-type="pmid">36681042</pub-id></mixed-citation></ref>
<ref id="B44"><label>44.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Andersen</surname> <given-names>JKH</given-names></name> <name><surname>Hubel</surname> <given-names>MS</given-names></name> <name><surname>Savarimuthu</surname> <given-names>TR</given-names></name> <name><surname>Rasmussen</surname> <given-names>ML</given-names></name> <name><surname>S&#x00F8;rensen</surname> <given-names>SLB</given-names></name> <name><surname>Grauslund</surname> <given-names>J</given-names></name></person-group>. <article-title>A digital online platform for education and certification of diabetic retinopathy health care professionals in the region of southern Denmark</article-title>. <source>Acta Ophthalmol</source>. (<year>2022</year>) <volume>100</volume>(<issue>5</issue>):<fpage>589</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1111/aos.15123</pub-id><pub-id pub-id-type="pmid">35277926</pub-id></mixed-citation></ref>
<ref id="B45"><label>45.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Borakati</surname> <given-names>A</given-names></name></person-group>. <article-title>Evaluation of an international medical E-learning course with natural language processing and machine learning</article-title>. <source>BMC Med Educ</source>. (<year>2021</year>) <volume>21</volume>:<fpage>181</fpage>. <pub-id pub-id-type="doi">10.1186/s12909-021-02609-8</pub-id><pub-id pub-id-type="pmid">33766037</pub-id></mixed-citation></ref>
<ref id="B46"><label>46.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gabr</surname> <given-names>AM</given-names></name> <name><surname>Li</surname> <given-names>N</given-names></name> <name><surname>Schenning</surname> <given-names>RC</given-names></name> <name><surname>Elbarbary</surname> <given-names>A</given-names></name> <name><surname>Anderson</surname> <given-names>JC</given-names></name> <name><surname>Kaufman</surname> <given-names>JA</given-names></name><etal/></person-group> <article-title>Diagnostic and interventional radiology case volume and education in the age of pandemics: impact analysis and potential future directions</article-title>. <source>Acad Radiol</source>. (<year>2020</year>) <volume>27</volume>(<issue>10</issue>):<fpage>1481</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.acra.2020.07.014</pub-id><pub-id pub-id-type="pmid">32703647</pub-id></mixed-citation></ref>
<ref id="B47"><label>47.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Me&#x015F;e</surname> <given-names>I</given-names></name> <name><surname>Alt&#x0131;nta&#x015F; Ta&#x015F;l&#x0131;&#x00E7;ay</surname> <given-names>C</given-names></name> <name><surname>Kuzan</surname> <given-names>BN</given-names></name> <name><surname>Kuzan</surname> <given-names>TY</given-names></name> <name><surname>Sivrio&#x011F;lu</surname> <given-names>AK</given-names></name></person-group>. <article-title>Educating the next generation of radiologists: a comparative report of ChatGPT and E-learning resources</article-title>. <source>Diagn Interv Radiol</source>. (<year>2024</year>) <volume>30</volume>:<fpage>163</fpage>&#x2013;<lpage>74</lpage>. <pub-id pub-id-type="doi">10.4274/dir.2023.232496</pub-id></mixed-citation></ref>
<ref id="B48"><label>48.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singaram</surname> <given-names>VS</given-names></name> <name><surname>Pillay</surname> <given-names>R</given-names></name> <name><surname>Mbobnda Kapche</surname> <given-names>EL</given-names></name></person-group>. <article-title>Exploring the role of digital technology for feedback exchange in clinical training: a scoping review</article-title>. <source>Syst Rev</source>. (<year>2024</year>) <volume>13</volume>:<fpage>298</fpage>. <pub-id pub-id-type="doi">10.1186/s13643-024-02705-y</pub-id><pub-id pub-id-type="pmid">39633468</pub-id></mixed-citation></ref>
<ref id="B49"><label>49.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Awuah</surname> <given-names>WA</given-names></name> <name><surname>Adebusoye</surname> <given-names>FT</given-names></name> <name><surname>Wellington</surname> <given-names>J</given-names></name> <name><surname>David</surname> <given-names>L</given-names></name> <name><surname>Salam</surname> <given-names>A</given-names></name> <name><surname>Yee</surname> <given-names>ALW</given-names></name><etal/></person-group> <article-title>Recent outcomes and challenges of artificial intelligence, machine learning, and deep learning in neurosurgery</article-title>. <source>World Neurosurg X</source>. (<year>2024</year>) <volume>23</volume>:<fpage>100301</fpage>. <pub-id pub-id-type="doi">10.1016/j.wnsx.2024.100301</pub-id><pub-id pub-id-type="pmid">38577317</pub-id></mixed-citation></ref>
<ref id="B50"><label>50.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zavala Calahorrano</surname> <given-names>AM</given-names></name> <name><surname>Del Pozo S&#x00E1;nchez</surname> <given-names>G</given-names></name> <name><surname>Chaves Corral</surname> <given-names>KN</given-names></name></person-group>. <article-title>Systematic review MedTech, and artificial intelligence</article-title>. <source>Salud, Ciencia y Tecnolog&#x00ED;a - Serie de Conferencias</source>. (<year>2024</year>) <volume>3</volume>(<issue>0</issue>):<fpage>1</fpage>&#x2013;<lpage>10</lpage>. <comment>ISSN-e 2953-4860</comment>. <pub-id pub-id-type="doi">10.56294/sctconf2024789</pub-id></mixed-citation></ref>
<ref id="B51"><label>51.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Savage</surname> <given-names>M</given-names></name> <name><surname>Spence</surname> <given-names>A</given-names></name> <name><surname>Turbitt</surname> <given-names>L</given-names></name></person-group>. <article-title>The educational impact of technology-enhanced learning in regional anaesthesia: a scoping review</article-title>. <source>Br J Anaesth</source>. (<year>2024</year>) <volume>133</volume>(<issue>2</issue>):<fpage>400</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1016/j.bja.2024.04.045</pub-id><pub-id pub-id-type="pmid">38824073</pub-id></mixed-citation></ref>
<ref id="B52"><label>52.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>St Mart</surname> <given-names>J-P</given-names></name> <name><surname>Goh</surname> <given-names>EL</given-names></name> <name><surname>Liew</surname> <given-names>I</given-names></name> <name><surname>Shah</surname> <given-names>Z</given-names></name> <name><surname>Sinha</surname> <given-names>J</given-names></name></person-group>. <article-title>Artificial intelligence in orthopaedics surgery: transforming technological innovation in patient care and surgical training</article-title>. <source>Postgrad Med J</source>. (<year>2023</year>) <volume>99</volume>(<issue>1173</issue>):<fpage>687</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.1136/postgradmedj-2022-141596</pub-id><pub-id pub-id-type="pmid">37389584</pub-id></mixed-citation></ref>
<ref id="B53"><label>53.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Antoniou</surname> <given-names>V</given-names></name> <name><surname>Gauhar</surname> <given-names>V</given-names></name> <name><surname>Kallidonis</surname> <given-names>P</given-names></name> <name><surname>Skolarikos</surname> <given-names>A</given-names></name> <name><surname>Veneziano</surname> <given-names>D</given-names></name> <name><surname>Liatsikos</surname> <given-names>E</given-names></name><etal/></person-group> <article-title>Education and training evolution in urolithiasis: a perspective from European school of urology</article-title>. <source>Asian J Urol</source>. (<year>2023</year>) <volume>10</volume>(<issue>3</issue>):<fpage>281</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.ajur.2023.01.004</pub-id><pub-id pub-id-type="pmid">37538161</pub-id></mixed-citation></ref>
<ref id="B54"><label>54.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ramamurthy</surname> <given-names>S</given-names></name> <name><surname>Yamamahi</surname> <given-names>SA</given-names></name> <name><surname>Abdul Rahim</surname> <given-names>A</given-names></name></person-group>. <article-title>The role of the metaverse in transforming healthcare</article-title>. <source>Res J Pharm Technol</source>. (<year>2023</year>) <volume>16</volume>(<issue>11</issue>):<fpage>5506</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.52711/0974-360X.2023.00891</pub-id></mixed-citation></ref>
<ref id="B55"><label>55.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jaju</surname> <given-names>R</given-names></name> <name><surname>Saxena</surname> <given-names>M</given-names></name> <name><surname>Paliwal</surname> <given-names>N</given-names></name> <name><surname>Bihani</surname> <given-names>P</given-names></name> <name><surname>Tharu</surname> <given-names>V</given-names></name></person-group>. <article-title>The show must go on&#x201D;: aftermath of COVID-19 on anesthesiology residency programs</article-title>. <source>Saudi J Anaesth</source>. (<year>2022</year>) <volume>16</volume>(<issue>4</issue>):<fpage>452</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.4103/sja.sja_563_22</pub-id><pub-id pub-id-type="pmid">36337394</pub-id></mixed-citation></ref>
<ref id="B56"><label>56.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tekke&#x015F;in</surname> <given-names>MS</given-names></name> <name><surname>Khurram</surname> <given-names>SA</given-names></name></person-group>. <article-title>A new Dawn: the impact of digital technologies in oral and maxillofacial pathology</article-title>. <source>Deneysel ve Klinik T&#x0131;p Dergisi</source>. (<year>2021</year>) <volume>38</volume>(<issue>3s</issue>):<fpage>81</fpage>&#x2013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.52142/omujecm.38.si.dent.1</pub-id></mixed-citation></ref>
<ref id="B57"><label>57.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Finocchiaro</surname> <given-names>M</given-names></name> <name><surname>Cortegoso Valdivia</surname> <given-names>P</given-names></name> <name><surname>Hernansanz</surname> <given-names>A</given-names></name> <name><surname>Marino</surname> <given-names>N</given-names></name> <name><surname>Amram</surname> <given-names>D</given-names></name> <name><surname>Casals</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Training simulators for gastrointestinal endoscopy: current and future perspectives</article-title>. <source>Cancers (Basel)</source>. (<year>2021</year>) <volume>13</volume>:<fpage>1427</fpage>. <pub-id pub-id-type="doi">10.3390/cancers13061427</pub-id><pub-id pub-id-type="pmid">33804773</pub-id></mixed-citation></ref>
<ref id="B58"><label>58.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hoogenboom</surname> <given-names>SA</given-names></name> <name><surname>van Hooft</surname> <given-names>JE</given-names></name> <name><surname>Wallace</surname> <given-names>MB</given-names></name></person-group>. <article-title>Training for advanced endoscopic imaging in gastrointestinal diseases</article-title>. <source>Tech Innov Gastrointest Endosc</source>. (<year>2021</year>) <volume>23</volume>(<issue>1</issue>):<fpage>99</fpage>&#x2013;<lpage>106</lpage>. <pub-id pub-id-type="doi">10.1016/j.tige.2020.09.001</pub-id></mixed-citation></ref>
<ref id="B59"><label>59.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kranjcevic</surname> <given-names>N</given-names></name> <name><surname>Rodriguez</surname> <given-names>MA</given-names></name> <name><surname>Vazquez</surname> <given-names>E</given-names></name> <name><surname>Kupesic-Plavsic</surname> <given-names>S</given-names></name></person-group>. <article-title>Education, scholarship, academic, and public services during and after Corona crisis</article-title>. <source>Donald Sch J Ultrasound Obstet Gynecol</source>. (<year>2020</year>) <volume>14</volume>(<issue>3</issue>):<fpage>288</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.5005/jp-journals-10009-1658</pub-id></mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3128920/overview">Judith Francisco-P&#x00E9;rez</ext-link>, Pontificia Universidad Cat&#x00F3;lica del Ecuador, Ecuador</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/566948/overview">Jennifer Marie Binzak Fugate</ext-link>, Kansas City University, Kansas City, United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3077468/overview">Aynura Karimova</ext-link>, Baku State University, Azerbaijan</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3298088/overview">Mohammed Messaoudi</ext-link>, Universit&#x00E9; Ahmed Draia Adrar, Algeria</p></fn>
</fn-group>
</back>
</article>