<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Educ.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Education</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Educ.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2504-284X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/feduc.2026.1761456</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Immersive equity: virtual reality and agentic artificial intelligence as catalysts for equity in education</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Chimbo</surname> <given-names>Bester</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/2687392"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Maguraushe</surname> <given-names>Kudakwashe</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<uri xlink:href="https://loop.frontiersin.org/people/3007518"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Mutunhu Ndlovu</surname> <given-names>Belinda</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<uri xlink:href="https://loop.frontiersin.org/people/2928755"/>
</contrib>
</contrib-group>
<aff id="aff1"><institution>School of Computing, University of South Africa</institution>, <city>Johannesburg</city>, <country country="za">South Africa</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Kudakwashe Maguraushe, <email xlink:href="mailto:magark@unisa.ac.za">magark@unisa.ac.za</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-10">
<day>10</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>11</volume>
<elocation-id>1761456</elocation-id>
<history>
<date date-type="received">
<day>05</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>09</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>19</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Chimbo, Maguraushe and Mutunhu Ndlovu.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Chimbo, Maguraushe and Mutunhu Ndlovu</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-10">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Inclusive education advocates for removing structural barriers that may hinder learners with varying abilities from achieving equity in formal learning environments. Nevertheless, many students with special educational needs (SEN) face outright rejection due to rigid pedagogies, inaccessible content, and limited support, despite international commitments, such as sustainable development goal 4 (SDG4). New immersive technologies, especially virtual reality (VR) and AI, can really alter education from episodic accommodation toward empowerment with equal opportunity. Notwithstanding the improvements, the technologies are still often considered as separate domains, resulting in a lack of understanding of the integrated, agentic use of technologies that can promote educational equity for learners with special needs.</p></sec>
<sec>
<title>Methods</title>
<p>Using the PRISMA 2020 framework, searches were conducted across Scopus, Web of Science, and ERIC between 2020 and 2025. Sixteen empirical studies meeting the final inclusion criteria were analyzed to identify the pedagogical applications, learning outcomes, ethical challenges, and guiding frameworks. The SPIDER framework guided eligibility decisions, while methodological quality was assessed using the Mixed Methods Appraisal Tool (MMAT).</p></sec>
<sec>
<title>Results</title>
<p>The findings revealed that integrating a VR&#x02013;AI environment increases engagement, cognitive accuracy, affective safety, and socio-emotional development by fostering co-agency, where the intelligent system acts with its own agency alongside the learner, enabling adaptive difficulty, emotional regulation, and personalized pacing. Interventions generally yielded greater gains in learner motivation, confidence, and communication skills compared to conventional methods. On the other hand, the scale-up of these innovations continues to be hindered by infrastructural inequities, limited teacher readiness, and thorny ethical issues related to data privacy and algorithmic bias. However, cross-study comparisons reveal some important limitations: limited diversity, a lack of rigorous accessibility testing and data governance safeguards, no longitudinal validation, and inadequate evaluation frameworks for algorithmic bias and biometric data ethics.</p></sec>
<sec>
<title>Discussion</title>
<p>The study, drawing on insights, suggests the Integrated VR&#x02013;AI Equity Framework, which consolidates pedagogical, affective, accessibility, and ethical angles into a single model for equitable immersive inclusion in learning. In the end, educational equity must, in fact, be an endeavor toward compassionate design, ethical governance, and systemic support so that every learner can authentically feel a sense of belonging and dignity in intelligent learning environments.</p></sec></abstract>
<kwd-group>
<kwd>virtual reality</kwd>
<kwd>agentic artificial intelligence</kwd>
<kwd>equitable access</kwd>
<kwd>inclusive education</kwd>
<kwd>immersive technologies</kwd>
<kwd>equity</kwd>
<kwd>special educational needs</kwd>
<kwd>accessibility</kwd>
</kwd-group>
<funding-group>
  <funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="61"/>
<page-count count="16"/>
<word-count count="11871"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Special Educational Needs</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Inclusive education is built on the claim that all students, regardless of their disability, background, language, or cognitive characteristics, are entitled to equal access to good learning opportunities in places that recognize their identity and meet their needs (<xref ref-type="bibr" rid="B38">Prada et al., 2023</xref>; <xref ref-type="bibr" rid="B23">Maguraushe et al., 2025</xref>). However, the policy discourse, which appears progressive and advocates for such coexistence, has identified an implementation gap between stated intent and practice (<xref ref-type="bibr" rid="B42">Schuster, 2022</xref>; <xref ref-type="bibr" rid="B39">Quirke and Galvin, 2025</xref>; <xref ref-type="bibr" rid="B7">Cote and Banks, 2025</xref>). Until today, education systems maintain the invisibility norm that most learners think, behave, and process information similarly; therefore, in error, neurodivergent learners and those with physical, sensory, linguistic, or socio-economic barriers get excluded (<xref ref-type="bibr" rid="B38">Prada et al., 2023</xref>; <xref ref-type="bibr" rid="B5">Cook, 2024</xref>). The pandemic exposed yet another layer of division, revealing that digital participation is one thing with connectivity, but another with accessibility, freedom, and differentiated support (<xref ref-type="bibr" rid="B31">Ndlovu et al., 2023</xref>; <xref ref-type="bibr" rid="B2">Beaunoyer et al., 2020</xref>). Nonetheless, the current scholarship on immersive learning is found to be fragmentary. It largely focuses on virtual reality (VR) or artificial intelligence (AI) in isolation, with little synthesis that embraces the potential of VR combined with AI as an agentic system that can contribute to inclusive education through pedagogical, affective, and ethical inputs.</p>
<p>In response, an increasing number of scholars and practitioners advocate for pedagogies that discount normalization and celebrate neurodiversity, divergent embodiment, and multiple ways of knowing. Among other valid pedagogies, VR stands out as a strong alternative to traditional text-dominant pedagogy through the paradigm of embodied cognition, thereby providing more immersive and experiential learning that supports active problem-solving and social-emotional development (<xref ref-type="bibr" rid="B26">Mapfumo et al., 2024</xref>; <xref ref-type="bibr" rid="B24">Makransky and Petersen, 2021</xref>; <xref ref-type="bibr" rid="B36">Petersen et al., 2022</xref>). VR also plays a vital role in supporting students with special educational needs (SEN). In this study, SEN refers to learners with developmental, cognitive, communication, or behavioral differences, such as ASD, ADHD, intellectual disabilities, dyslexia, and motor impairments, who require adapted, accessible, or personalized instructional support, as reflected in the empirical studies reviewed (<xref ref-type="bibr" rid="B17">LaFountain, 2025</xref>). To ensure comparability across studies, SEN were defined according to UNESCO and UNCRPD classifications, including neurodevelopmental, cognitive, sensory, communication, and physical disabilities. Thus, for SEN students, numerous opportunities in VR facilitate safe interactions, make abstract ideas visible, and configure sensory settings to overcome enduring barriers that may be cognitive or physical (<xref ref-type="bibr" rid="B1">Ardai et al., 2022</xref>; <xref ref-type="bibr" rid="B53">Syafiq and Hakim, 2024</xref>). For example, autistic learners can rehearse communication scenarios without fear of judgment (<xref ref-type="bibr" rid="B53">Syafiq and Hakim, 2024</xref>; <xref ref-type="bibr" rid="B21">Lorenzo-Lled&#x000F3; et al., 2023</xref>; <xref ref-type="bibr" rid="B58">Wood, 2020</xref>), and individuals with dyslexia can benefit from multi-modal engagement with text that alleviates the strain of decoding (<xref ref-type="bibr" rid="B41">Roberts, 2022</xref>). Nevertheless, VR alone does not guarantee equity, as it can overwhelm the sensory needs of individuals who are considerate, distractible, or have fine-motor challenges without adequate scaffolding (<xref ref-type="bibr" rid="B8">Creed et al., 2024</xref>; <xref ref-type="bibr" rid="B33">Owen et al., 2024</xref>). Furthermore, VR cannot sufficiently improve equitable learning outcomes without intelligent mediation. Across the literature, VR environments appear to have strong motivational effects; however, without adaptive feedback, affective regulation, or personalized scaffolding, the cognitive, emotional, and accessibility outcomes of VR were quite inconclusive. That is where agentic artificial intelligence (Agentic AI) becomes important.</p>
<p>Historically, AI in education has been primarily used for administrative or evaluative purposes, thereby strengthening top-down structures of control and standardization (<xref ref-type="bibr" rid="B30">Ndlovu and Maguraushe, 2025</xref>; <xref ref-type="bibr" rid="B61">Zeng et al., 2025</xref>). This misalignment between immersion and personalized support reveals a critical pedagogical gap: immersive experiences must be paired with real-time adaptive intelligence to create sustained benefits for neurodivergent and diverse learners. Agentic AI is equipped with adaptive reasoning, emotion recognition, conversational feedback, and shared decision-making, enabling learners to take an active role alongside the agent in their ongoing development (<xref ref-type="bibr" rid="B61">Zeng et al., 2025</xref>; <xref ref-type="bibr" rid="B54">Tanprasert, 2025</xref>). In VR settings, Agentic AI can detect frustration, adjust task difficulty, support self-regulation, and compassionately nudge learners back into engagement (<xref ref-type="bibr" rid="B27">Maroju and Bhattacharya, 2025</xref>; <xref ref-type="bibr" rid="B19">Liu et al., 2025</xref>). AI should uphold human dignity rather than focusing narrowly on optimization (<xref ref-type="bibr" rid="B55">Teo, 2023</xref>). With such co-agency through VR immersion, coupled with AI-driven personalization, learners feel a sense that technologies work with them rather than on them (<xref ref-type="bibr" rid="B25">M&#x000E4;ntyoja and Hautala, 2025</xref>; <xref ref-type="bibr" rid="B14">Ionit&#x00103; et al., 2025</xref>). Such a hu-man-centered partnership transforms immersive technology into a beacon for belonging, confidence, and educational equity (<xref ref-type="bibr" rid="B3">Boot and Hughes, 2025</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>).</p>
<p>Nevertheless, a notable uneven realization of the VR&#x02013;AI transformational potential has been observed (<xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>; <xref ref-type="bibr" rid="B9">Halkiopoulos and Gkintoni, 2024</xref>). Schools in the Global North have been experimenting with immersive inclusion, whereas schools serving disadvantaged communities face infrastructural, financial, and expertise-related constraints (<xref ref-type="bibr" rid="B45">Siddiqi, 2024</xref>; <xref ref-type="bibr" rid="B46">Singh et al., 2024</xref>). Innovations thus stand to increase inequalities they promise to tackle; for those who could most benefit from them remain the least likely to access them (<xref ref-type="bibr" rid="B43">Selwyn, 2022</xref>). Additionally, accessibility affordances are rarely assessed; the few assessments do not reference frameworks such as the universal design for learning (UDL) or the XAUR accessibility guidelines to inform their processes. This means that immersion alone does not suffice to instill inclusion. Another set of concerns related to emotion tracking, motion sensitivity, and AI decision-making bias is often mentioned but rarely empirically examined, thus putting vulnerable learners at risk (<xref ref-type="bibr" rid="B40">Register, 2024</xref>).</p>
<p>Additionally, current research is fragmented, as VR and AI are often studied in isolation; SEN populations remain underrepresented, and longitudinal studies remain scarce. As a result, a comprehensive understanding of how immersive technologies can provide meaningful support for autonomy, identity expression, socio-emotional growth, and learning over time. Moreso, despite the rapid expansion of Agentic AI capable of adaptive reasoning, affective inference, and co-agency, there is no systematic review that consolidates conceptual evidence on how these systems function when embedded inside VR environments to support inclusion. Thus, several critical gaps in the literature remain unresolved. First, existing reviews on VR for SEN learners seldom analyze the role of AI-mediated adaptivity or emotion-aware scaffolding. Second, AI-in-education reviews almost entirely ignore immersive or embodied learning contexts. Third, ethical risks, including algorithmic bias, biometric data extraction, and accessibility requirements, remain unexamined, especially for vulnerable learners. Finally, the field lacks an integrated theoretical model explaining how VR&#x02013;AI co-agency advances or constrains educational equity. These gaps provide the rationale for the present review. The review investigates the following questions:</p>
<list list-type="order">
<list-item><p><italic>How are VR and Agentic AI being used pedagogically to support inclusion?</italic></p></list-item>
<list-item><p><italic>How effective are VR&#x02013;AI systems in enhancing learning and socio-emotional outcomes for learners with SEN?</italic></p></list-item>
<list-item><p><italic>How do learners, teachers, and other stakeholders perceive the integration of VR with AI?</italic></p></list-item>
<list-item><p><italic>What are the frameworks and models guiding the implementation of VR&#x02013;AIs into inclusive education?</italic></p></list-item>
<list-item><p><italic>What ethical and practical challenges deter the universal scaling of VR&#x02013;AI technologies for educational equity?</italic></p></list-item>
</list>
<p>By considering these questions, the article provides a comprehensible evidence base and proposes a new theoretical model: the Agentic VR-Equity Framework, which offers an ethical, accessible, and learner-centered approach to the deployment of immersive technologies in diverse global contexts.</p>
<p>This study contributes by positioning VR and Agentic AI as a unified immersive ecosystem for advancing equity in inclusive education. It synthesizes evidence showing how VR&#x02013;AI co-agency enhances cognitive, affective, behavioral, and socio-emotional outcomes for diverse SEN learners, while also exposing persistent methodological, ethical, and accessibility gaps, particularly around short intervention durations, limited SEN representation, and weak AI transparency. The study introduces the <italic>Integrated VR&#x02013;AI Equity Framework</italic>, a novel model explaining how embodiment, adaptive intelligence, emotional regulation, and ethical governance interact to produce immersive equity. Finally, it sets a forward-looking research agenda, calling for longitudinal, cross-cultural, and fairness-focused immersive learning studies.</p></sec>
<sec sec-type="materials and methods" id="s2">
<label>2</label>
<title>Materials and methods</title>
<sec>
<label>2.1</label>
<title>Research design</title>
<p>The review was conducted in accordance with PRISMA 2020 guidelines (<xref ref-type="bibr" rid="B34">Page et al., 2021</xref>) to provide transparency and replicability, and used the SPIDER framework (Sample, Phenomenon of Interest, Design, Evaluation, and Research type) to achieve a balance between methodological rigor and the different types of evidence that are typical of inclusive education (<xref ref-type="bibr" rid="B6">Cooke et al., 2012</xref>). The SPIDER framework was selected because it accommodates the interpretive, design-oriented, and experiential elements typical of inclusive-education research, offering greater sensitivity than PICO for multimodal interventions involving VR, AI, and SEN populations. This design prioritized teaching and human experience outcomes. The methodological position is founded on humanistic and participatory values, recognizing that research with SEN populations needs to be very alert to voice, dignity, and autonomy (<xref ref-type="bibr" rid="B32">O&#x00027;Connor et al., 2023</xref>). Technology is not the primary focus of design; rather, the empowerment of learners and the socio-educational impact are given priority.</p>
</sec>
<sec>
<label>2.2</label>
<title>Search strategy</title>
<p>Three academic databases were systematically searched on 24 October 2025 to ensure comprehensive coverage of contemporary immersive-technology development: Scopus, Web of Science (WoS), and Education Resources Information Center (ERIC). Scopus and Web of Science were selected due to their extensive indexing of high-impact education, computer science, and interdisciplinary technology journals, while ERIC was included for its depth in pedagogical and special education scholarship. To capture the intersection of VR, Agentic AI, and inclusion, Boolean operators were applied to keywords relating to:</p>
<disp-quote><p>((&#x0201C;virtual realit<sup>&#x0002A;</sup>&#x0201D; OR &#x0201C;immersive VR&#x0201D;) AND (&#x0201C;Agentic artificial intelligence&#x0201D; OR &#x0201C;intelligent agent<sup>&#x0002A;</sup>&#x0201D; OR &#x0201C;Agentic AI&#x0201D; OR &#x0201C;Autonomous AI<sup>&#x0002A;</sup>&#x0201D; OR &#x0201C;Intelligent Agent&#x0201D; OR &#x0201C;Self-directed AI&#x0201D; OR &#x0201C;adaptive AI&#x0201D; OR &#x0201C;Cognitive Agent&#x0201D; OR &#x0201C;intelligent virtual agent&#x0201D;) AND (&#x0201C;inclusive education&#x0201D; OR &#x0201C;educational inclusion&#x0201D; OR &#x0201C;equity&#x0201D; OR &#x0201C;social justice&#x0201D; OR accessib<sup>&#x0002A;</sup> OR &#x0201C;universal design for learning&#x0201D; OR &#x0201C;UDL&#x0201D; OR &#x0201C;special education&#x0201D; OR &#x0201C;student<sup>&#x0002A;</sup> with disabilit<sup>&#x0002A;</sup>&#x0201D;) AND (&#x0201C;Immersive Experience&#x0201D; OR &#x0201C;Immersive Learning&#x0201D; OR &#x0201C;Immersive Technology&#x0201D;))</p></disp-quote>
<p>Search strings were adapted minimally for database-specific syntax while maintaining conceptual equivalence. The search spanned from January 2020 to January 2025, aligning with recent surges in immersive adoption following pandemic-driven digital transformation (<xref ref-type="bibr" rid="B16">Klimanov and Tretyak, 2024</xref>).</p>
</sec>
<sec>
<label>2.3</label>
<title>Inclusion criteria</title>
<p>The inclusion criteria include the following:</p>
<list list-type="bullet">
<list-item><p>Population: students with diverse needs, disabilities, or marginalized populations.</p></list-item>
<list-item><p>Context: focus on inclusive education, educational equity, accessibility, or diversity outcomes and not general educational effectiveness.</p></list-item>
<list-item><p>Intervention: Agentic AI and VR simultaneously in educational contexts.</p></list-item>
<list-item><p>Comparator: traditional pedagogical learning approaches.</p></list-item>
<list-item><p>Outcomes: measure relevant outcomes such as learning outcomes, engagement, accessibility, participation, autonomy in education, or social inclusion metrics.</p></list-item>
<list-item><p>Language: English</p></list-item>
<list-item><p>Year: 2020&#x02013;2025</p></list-item>
<list-item><p>Publication Status: final</p></list-item>
<list-item><p>Publication Type: peer-reviewed journals, conferences, and book chapters</p></list-item>
</list>
<p>The review deliberately restricted AI to agentic, adaptive, or affective systems because these forms of intelligence can respond in real time to learner behaviors, emotions, and cognitive load; an essential requirement for analyzing co-agency and equitable immersion.</p>
<p>Equally, the exclusion criteria include the following:</p>
<list list-type="bullet">
<list-item><p>Studies focusing on Augmented Reality (AR), Mixed Reality (MR), and extended Reality (XR).</p></list-item>
<list-item><p>Studies focusing on general Artificial Intelligence and not Agentic AI.</p></list-item>
<list-item><p>Gray literature, preprints, newspapers, commentary, reports, editorial, abstracts, opinion-only pieces. Gray literature and preprints were excluded to preserve methodological integrity, given the high degree of technical volatility and unreviewed claims in emerging VR&#x02013;AI works</p></list-item>
<list-item><p>Studies not focused on educational applications but healthcare, entertainment, military training, therapeutic functions without instructional intention or other non-educational contexts, AI limited to administrative analytics (non-agentic functions), Rehabilitation-only VR (no instructional context), as these do not constitute pedagogical applications relevant to equity in learning.</p></list-item>
<list-item><p>Non-English articles.</p></list-item>
<list-item><p>Studies before 2020.</p></list-item>
</list>
</sec>
<sec>
<label>2.4</label>
<title>Screening and eligibility</title>
<p>The initial search produced <italic>n</italic> = 144 records. After removing records for other reasons, including duplicates and language used (<italic>n</italic> = 3), a total of <italic>n</italic> = 141 remained for screening. The titles were screened for eligibility, and <italic>n</italic> = 94 records were excluded. Additionally, the abstracts were read, and <italic>n</italic> = 17 were eliminated. Out of the <italic>n</italic> = 30 remaining papers, <italic>n</italic> = 3 full-text papers could not be retrieved and were therefore eliminated. The remaining full texts of <italic>n</italic> = 27 papers were assessed on whether they answered the research questions posed, demonstrated sound methodological rigor, and had valid population samples. All screening stages: title, abstract, and full-text review were independently conducted by two reviewers, with disagreements resolved through discussion. Only 16 papers met the final inclusion criteria, and therefore, their study findings were extracted. This process is summarized in <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>PRISMA workflow diagram. Adapted from <xref ref-type="bibr" rid="B34">Page et al. (2021)</xref>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="feduc-11-1761456-g0001.tif">
<alt-text content-type="machine-generated">Flowchart illustrating the process of identifying studies via databases. Initial records identified: 144 from databases (89 Scopus, 17 Web of Science, 38 ERIC). Three records removed before screening. Records screened: 141. Exclusions: 94 due to title, 17 due to abstract. 30 reports sought; 3 not retrieved. Eligibility assessment: 27 reports. Exclusions: 7 could not answer research questions, 3 lacked methodological rigor, 1 had an invalid population sample. Sixteen studies included in the review.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>2.5</label>
<title>Quality appraisal</title>
<p>To ensure inter-reliability, coding was performed independently by two reviewers, yielding 92% intercoder agreement. This high level of consensus arguably reflects shared familiarity with inclusive learning values and minimal interpretive drift in the appraisal of studies. Using an adapted MMAT (<xref ref-type="bibr" rid="B11">Hong et al., 2018a</xref>,<xref ref-type="bibr" rid="B12">b</xref>), a quality appraisal was performed to assess the methodology&#x00027;s integrity. Detailed itemized scoring for empirical studies was based on five specific criteria: research presentation, appropriateness of design, clarity of data collection and analysis, transparency, and applicability. Two independent reviewers assigned each study a quality rating of High, Moderate, or Low, and any discrepancies were resolved through a consensus process. In sum, nine studies received high-quality ratings, five were of mediocre quality, and eight were considered low quality. This was largely due to a small sample size or lack of methodological transparency. Cases receiving MMAT ratings were not excluded from the meta-synthesis; however, results were downstream with cautious weighting applied, especially when an effectiveness claim or broad generalizability was involved. The views of all studies, nonetheless, spurred further understanding of the context. This rigorous appraisal primarily enhanced credibility, dependability, and trustworthiness underpinning the collaborative integration of VR&#x02013;AI into inclusive education.</p>
</sec>
<sec>
<label>2.6</label>
<title>Data extraction</title>
<p>Data extraction captured variables such as the country of origin, methodology adopted, type of special need or context, pedagogical applications, and learning outcomes. Importantly, ethical and implementation challenges were analyzed as critical variables, not optional reporting additions, because immersive technologies can both empower and unintentionally marginalize learners who are neurodivergent, sensory-sensitive, or mobility-challenged, especially if poorly designed (<xref ref-type="bibr" rid="B35">Parong and Mayer, 2021</xref>).</p></sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<p>In presenting the results, this section moves beyond descriptive reporting to provide comparative, cross-study interpretations organized around geographical distribution, educational levels, methodologies adopted, reported sample sizes, SEN categories, inclusion foci, VR/immersive modalities, AI roles, and the outcomes emphasized. Where possible, patterns, inconsistencies, and divergences across studies are highlighted to establish a critical foundation for the subsequent discussion. It is important to note that the reviewed studies reflect varying degrees of AI agency, ranging from adaptive or assistive systems to more autonomous, emotion-aware agents; this variation is explicitly acknowledged in the synthesis to avoid conceptual overgeneralization.</p>
<sec>
<label>3.1</label>
<title>Geography and sectors</title>
<p>Spatially, the 16 empirical studies reflect a multi-regional but uneven distribution of immersive inclusion innovation, as depicted in <xref ref-type="fig" rid="F2">Figure 2</xref>. As confirmed by the geographic mapping, the United States and Canada lead adoption (31.25%) (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B10">Hamed et al., 2024</xref>; <xref ref-type="bibr" rid="B47">Smith et al., 2024</xref>; <xref ref-type="bibr" rid="B44">Shu and Gu, 2023</xref>; <xref ref-type="bibr" rid="B22">Lowell and Yan, 2024</xref>), followed by China (18.75%) (<xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>; <xref ref-type="bibr" rid="B13">Hutson and McGinley, 2023</xref>; <xref ref-type="bibr" rid="B60">Zahid Iqbal and Campbell, 2023</xref>), with the remaining contributions dispersed individually across the United Kingdom (<xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>), Spain (<xref ref-type="bibr" rid="B49">Song et al., 2023</xref>), India (<xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>), Ireland (<xref ref-type="bibr" rid="B52">Strousopoulos et al., 2024</xref>), Greece (<xref ref-type="bibr" rid="B22">Lowell and Yan, 2024</xref>), Taiwan (<xref ref-type="bibr" rid="B18">Liao et al., 2024</xref>), Malaysia (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>), and the United Arab Emirates (<xref ref-type="bibr" rid="B20">Lorenzo Lled&#x000F3;, 2025</xref>) (each 6.25%). The heavy bias toward North America and East Asia in the number of studies not only points to the differences in access to immersive technologies but also to the structural research bias that makes the findings less representative. This pattern suggests hopeful global adoption; however, the lack of empirical studies from Sub-Saharan Africa and Latin America points to the continued existence of hidden structural inequalities in access to immersive technologies. This creates a geographical knowledge gap that limits generalizability and raises concerns over the fairness of the global distribution of immersive learning innovations. If the imbalances are not addressed, the benefits of VR&#x02013;AI will be monopolized by the already affluent education systems, which is precisely the situation that contradicts the equity commitments of SDG 4 and the UNCRPD. Therefore, immersive education research is no longer just a Global North activity. However, still depends on regional privilege, which means that generalizing the evidence should be contextually cautious and accompanied by targeted capacity building.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Geographic distribution.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="feduc-11-1761456-g0002.tif">
<alt-text content-type="machine-generated">Bar chart showing the frequency of countries chosen for an unspecified parameter. USA/Canada leads with a frequency of five, followed by China with three. Remaining countries, including Spain, Taiwan, India, Ireland, Greece, UAE, Malaysia, and UK, each have a frequency of one.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>3.2</label>
<title>Educational levels</title>
<p>The studies included in the review presented a variety of educational levels, thereby capturing both the school and the higher education sectors. Several studies have been conducted in primary and secondary school settings, particularly those examining the development of motor coordination in children with specific learning disabilities (SLD) and the effectiveness of social communication interventions in various schools (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B49">Song et al., 2023</xref>; <xref ref-type="bibr" rid="B44">Shu and Gu, 2023</xref>). On the other hand, a significant amount of research has come from university or postgraduate education cases, for instance, occupational therapy training, English language learning, and testing experimental prototypes in university labs or immersive CAVE-based learning environments (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref>; <xref ref-type="bibr" rid="B13">Hutson and McGinley, 2023</xref>; <xref ref-type="bibr" rid="B60">Zahid Iqbal and Campbell, 2023</xref>; <xref ref-type="bibr" rid="B52">Strousopoulos et al., 2024</xref>; <xref ref-type="bibr" rid="B22">Lowell and Yan, 2024</xref>). Studies conducted in higher education demonstrated more mature and technically sophisticated VR&#x02013;AI implementations compared to school-level interventions, which were generally smaller in scale and more exploratory. This stratification suggests that infrastructure and research capacity strongly influence the types of immersive inclusion models that can be deployed across sectors. This distribution suggests that the early adoption of integrated VR&#x02013;AI technologies has been most pronounced within higher education institutions, where research capacity and technical infrastructure are more readily available. Nonetheless, there is growing, albeit smaller-scale, application in school-age SEN contexts, demonstrating expanding inclusivity across educational tiers.</p>
</sec>
<sec>
<label>3.3</label>
<title>Methodologies</title>
<p>The study also analyzed the various methodologies used within the included studies, examining how the convergence of VR and Agentic AI can reimagine the possibilities of inclusive education, as depicted in <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Summary of methodologies adopted.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="feduc-11-1761456-g0003.tif">
<alt-text content-type="machine-generated">Pie chart displaying research methods with the following distribution: Experimental/Quasi-Experimental at 31%, Prototype/Usability Testing at 25%, Architecture plus User Testing at 25%, Mixed Methods at 13%, and Qualitative Multi-site at 6%.</alt-text>
</graphic>
</fig>
<p>The 16 empirical studies reviewed demonstrated a wide range of methodological diversity, reflecting the interdisciplinary nature of immersive educational research. Several studies employed quasi-experimental or experimental designs to examine SLD motor learning (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>), ASD social skills training (<xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>), smartphone-based VR prototypes (<xref ref-type="bibr" rid="B48">Sonawane et al., 2024</xref>), and GPT-assisted virtual classrooms (<xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>). Experimental and quasi-experimental studies have consistently produced stronger evidence of effectiveness, particularly for motor skills training and cognitive load reduction. Others used mixed methods, integrating quantitative measures and qualitative reflections, as in <xref ref-type="bibr" rid="B59">Yeganeh et al. (2025)</xref> and <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan (2023)</xref>, which evaluated inclusive language learning in metaverse settings. Qualitative multi-site studies (collaborative), including (<xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>), explored social-emotional and pedagogical engagement. These qualitative and prototype studies contributed richer insights into emotional safety, learner experience, and co-agency dynamics. Complementing these were design-based usability investigations that iteratively refined immersive systems through user testing, including the CAVE teaching assistant with agentic support (<xref ref-type="bibr" rid="B15">Jia et al., 2024</xref>). Notably, none of the included studies employed longitudinal designs, and very few provided follow-up assessments, which limits understanding of the durability of VR&#x02013;AI benefits once novelty effects diminish. While methodological innovation is clearly progressing, longitudinal validation remains scarce. Thus, methodological heterogeneity strengthens the field but also complicates direct comparison across outcome domains.</p>
</sec>
<sec>
<label>3.4</label>
<title>Learning outcomes domains</title>
<p>Importantly, although all included studies involved some form of intelligent support, not all systems operated with the same degree of autonomy. Many applications relied on assistive or adaptive mechanisms that guided learners through predefined pathways or rule-based adjustments (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>), whereas fewer studies described movement toward genuine pedagogical co-agency, where the system interpreted learner states, initiated instructional adjustments, and engaged learners through more autonomous interaction (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>; <xref ref-type="bibr" rid="B48">Sonawane et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>). <xref ref-type="fig" rid="F4">Figure 4</xref> summarizes the reported learning outcome domains.</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Frequency of the learning outcomes domains.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="feduc-11-1761456-g0004.tif">
<alt-text content-type="machine-generated">Bar chart on a dark background showing various skills. The bars represent engagement and motivation (12), cognitive performance and accuracy (8), social communication skills (6), motor skill development (2), and confidence/effective safety (6). Bars are blue and labeled with numbers at the top.</alt-text>
</graphic>
</fig>
<p>The analysis of the 16 empirical papers shows that 12 studies explicitly measured engagement and motivation (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref>; <xref ref-type="bibr" rid="B20">Lorenzo Lled&#x000F3;, 2025</xref>; <xref ref-type="bibr" rid="B49">Song et al., 2023</xref>; <xref ref-type="bibr" rid="B13">Hutson and McGinley, 2023</xref>; <xref ref-type="bibr" rid="B10">Hamed et al., 2024</xref>; <xref ref-type="bibr" rid="B47">Smith et al., 2024</xref>; <xref ref-type="bibr" rid="B44">Shu and Gu, 2023</xref>; <xref ref-type="bibr" rid="B60">Zahid Iqbal and Campbell, 2023</xref>), often linking these outcomes to immersive presence, learner satisfaction, or enjoyment. Moreover, eight studies examined cognitive performance and accuracy (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref>; <xref ref-type="bibr" rid="B20">Lorenzo Lled&#x000F3;, 2025</xref>; <xref ref-type="bibr" rid="B13">Hutson and McGinley, 2023</xref>; <xref ref-type="bibr" rid="B60">Zahid Iqbal and Campbell, 2023</xref>; <xref ref-type="bibr" rid="B52">Strousopoulos et al., 2024</xref>) through the lenses of memory retention, problem-solving, and concept understanding in immersive contexts. In addition, six studies have focused on social-communication skills (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref>; <xref ref-type="bibr" rid="B20">Lorenzo Lled&#x000F3;, 2025</xref>; <xref ref-type="bibr" rid="B13">Hutson and McGinley, 2023</xref>; <xref ref-type="bibr" rid="B60">Zahid Iqbal and Campbell, 2023</xref>; <xref ref-type="bibr" rid="B52">Strousopoulos et al., 2024</xref>), specifically among learners with autism or communication difficulties. Motor-skill development was the subject of two studies (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B49">Song et al., 2023</xref>), which focused primarily on primary-level interventions for students with SLDs, thus indicating an under-explored domain. Finally, six papers addressed confidence and affective safety (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B20">Lorenzo Lled&#x000F3;, 2025</xref>; <xref ref-type="bibr" rid="B47">Smith et al., 2024</xref>; <xref ref-type="bibr" rid="B44">Shu and Gu, 2023</xref>), showing that agentic AI can reduce anxiety and increase self-efficacy in virtual environments. All the findings presented above indicate that the integration of VR and AI has a strong positive impact on engagement and motivation. However, evidence for deeper cognitive or psychomotor outcomes is relatively limited, thus calling for longitudinal, multi-domain validations across all educational levels. However, pedagogical benefits were highly dependent on the AI agent&#x00027;s sophistication. Studies using basic rule-based agents or non-adaptive scripts showed significantly weaker outcomes, demonstrating that &#x02018;VR alone&#x00027; is insufficient without meaningful agentic intelligence.</p>
</sec>
<sec>
<label>3.5</label>
<title>Pedagogical applications of integrated VR and Agentic AI in inclusive learning environments</title>
<p>The analysis of the 16 papers also revealed the pedagogical applications of integrated VR and Agentic AI in inclusive learning environments. These were summarized in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Pedagogical applications of integrated VR and Agentic AI in inclusive learning environments.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Pedagogical application category</bold></th>
<th valign="top" align="left"><bold>Outcome source</bold></th>
<th valign="top" align="left"><bold>Description</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1. Adaptive learning and personalized instruction</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B50">Sripan and Jeerapattanatorn, 2025</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref></td>
<td valign="top" align="left">AI agents tailored task complexity and feedback in real time, supporting differentiated learning and inclusivity for diverse cognitive profiles</td>
</tr>
<tr>
<td valign="top" align="left">2. Immersive simulation for experiential learning</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B50">Sripan and Jeerapattanatorn, 2025</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">Immersive VR scenarios simulated real-world contexts, enabling the practice of abstract concepts and life skills within safe, controlled environments</td>
</tr>
<tr>
<td valign="top" align="left">3. Emotion-aware tutoring and affective support</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref></td>
<td valign="top" align="left">Agentic AI recognized cues of frustration and anxiety, providing emotional scaffolding that enhanced confidence and learning persistence</td>
</tr>
<tr>
<td valign="top" align="left">4. Collaborative and social learning environments</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B50">Sripan and Jeerapattanatorn, 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">Shared VR spaces supported peer interaction and empathy building, promoting inclusion through digital collaboration and co-agency</td>
</tr>
<tr>
<td valign="top" align="left">5. Cognitive load management and metacognitive support</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref></td>
<td valign="top" align="left">AI-managed task complexity, pacing, and feedback reduced overload and enhanced learner metacognition and reflection</td>
</tr>
<tr>
<td valign="top" align="left">6. Gamification and motivational scaffolding</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B50">Sripan and Jeerapattanatorn, 2025</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">Game mechanics integrated with adaptive AI increased motivation, sustained engagement, and supported achievement through incremental challenge-reward cycles</td>
</tr>
<tr>
<td valign="top" align="left">7. Multisensory and cross-modal learning design</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">Haptic, auditory, and visual stimuli allowed flexible multisensory engagement, aligning with universal design for learning principles</td>
</tr>
<tr>
<td valign="top" align="left">8. Independent and self-regulated learning</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B50">Sripan and Jeerapattanatorn, 2025</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">Agentic AI fosters autonomy by enabling learners to monitor their progress, adjust the difficulty level, and reflect independently</td>
</tr>
<tr>
<td valign="top" align="left">9. Inclusive assessment and feedback systems</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref></td>
<td valign="top" align="left">AI-enabled adaptive assessments and personalized feedback improved inclusivity by recognizing diverse performance indicators beyond standardized grading</td>
</tr>
<tr>
<td valign="top" align="left">10. Sensory-safe and anxiety-reduced learning environments</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref></td>
<td valign="top" align="left">Controlled immersive contexts reduce overstimulation, creating emotionally safe spaces for SEN learners who are sensitive to sensory input</td>
</tr></tbody>
</table>
</table-wrap>
<p>The integration of VR and Agentic AI for pedagogical purposes demonstrated a complex ecosystem of adaptive, affective, and multisensory learning affordances. Most of the research relied on personalization and scaffolding that were sensitive to learners&#x00027; emotions, demonstrating how the AI agents would adjust task difficulty and emotional tone to meet the learners&#x00027; needs. Immersive simulations enabled mastery, while metaverse classrooms offered opportunities for collaboration, social acceptance, and co-agency. Several studies have placed a UDL-aligned multisensory design at the forefront, thereby catering to a wide range of cognitive and physical abilities. Across studies, adaptive personalization and emotion-aware support emerged as the most effective mechanisms. Applications that integrated real-time feedback loops, such as adaptive pacing or frustration detection, produced higher reported gains in learning outcomes than static VR simulations.</p>
</sec>
<sec>
<label>3.6</label>
<title>Challenges in scaling VR and Agentic AI</title>
<p>The most common barriers are infrastructure disparities and teachers&#x00027; lack of preparedness, which have a greater impact on integration success than the sophistication of the hardware. The persistent accessibility gap indicates that many immersive systems remain far from compliance with UDL principles, while ethical and algorithmic dilemmas highlight the bigger risks of bias, surveillance, and exclusion. Moreover, misalignment between finances and policies undermines sustainability, leaving the promising prototypes on the pilot stage. Therefore, the learners&#x00027; struggle has become evident, necessitating adaptive pacing and ergonomic design. Challenges were not evenly distributed across contexts: resource-constrained settings primarily reported hardware, cost, and connectivity issues as the main obstacles, whereas more affluent areas primarily referred to ethical issues, data governance, and teacher preparedness. Of the 40 studies that mentioned data governance, four studies (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>) discussed the governance of biometric data. No equity or bias checks were performed on AI decision-making, indicating a significant empirical blind spot given the vulnerable position of SEN populations. It seems that the aspects of the implementation process that hinder progress are more closely linked to the context than to neutrality. Furthermore, the results suggest that genuine equity comprises infrastructural support, inclusive teaching, open and fair governance, and ethical responsibility. It is not just a highly desirable goal of sophisticated technology. These were summarized in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Challenges in scaling VR and Agentic AI for educational equity.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th valign="top" align="left"><bold>Challenge category</bold></th>
<th valign="top" align="left"><bold>Outcome source</bold></th>
<th valign="top" align="left"><bold>Description</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1. Infrastructure and resource limitations</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B50">Sripan and Jeerapattanatorn, 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">High hardware costs, bandwidth instability, and limited technical support restrict equitable access and long-term scalability, especially in resource-constrained contexts</td>
</tr>
<tr>
<td valign="top" align="left">2. Educator capacity and pedagogical readiness</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">Insufficient training in immersive pedagogy undermines the quality of integration and hinders the meaningful application of VR&#x02013;AI tools in classroom practice</td>
</tr>
<tr>
<td valign="top" align="left">3. Accessibility and inclusive design gaps</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B18">Liao et al., 2024</xref></td>
<td valign="top" align="left">Many systems neglect UDL principles, leading to sensory, physical, or cognitive exclusion for learners with disabilities</td>
</tr>
<tr>
<td valign="top" align="left">4. Ethical and data privacy concerns</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref></td>
<td valign="top" align="left">Unclear policies for the use of emotional and biometric data risk learner surveillance, bias, and privacy violations</td>
</tr>
<tr>
<td valign="top" align="left">5. Algorithmic bias and transparency issues</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref></td>
<td valign="top" align="left">Black-box AI decision-making reduces accountability, with bias potentially reinforcing inequities among neurodiverse and minority learners</td>
</tr>
<tr>
<td valign="top" align="left">6. Financial and policy sustainability</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref></td>
<td valign="top" align="left">High implementation costs and the absence of formal policy frameworks hinder national-level scalability and long-term sustainability</td>
</tr>
<tr>
<td valign="top" align="left">7. Learner fatigue and cognitive overload</td>
<td valign="top" align="left"><xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref></td>
<td valign="top" align="left">Extended immersion without adaptive pacing can induce fatigue and sensory overload, reducing the inclusive benefits of VR&#x02013;AI environments</td>
</tr></tbody>
</table>
</table-wrap>
<p>Taken holistically, the findings indicate strong pedagogical potential for the collaboration between VR and AI as co-agents; they also show the major flaws in the quality of the evidence, accessibility practices, geographic representation, and ethical evaluation. Such patterns across different areas of study form the basis for the ensuing discussion, which examines the role of VR&#x02013;AI integration in enhancing or limiting inclusiveness and equity of learning in varied educational settings.</p></sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>The intersection of VR and Agentic AI in inclusive education has generated a growing body of evidence, represented here by 16 empirical studies. Collectively, these studies suggest that immersive and intelligent systems do not operate in isolation; rather, their educational value is shaped by pedagogical intentionality, contextual readiness, and ethical sensitivity. Accordingly, this discussion moves beyond description to provide a theory-informed critical synthesis of how VR&#x02013;AI co-agency functions across pedagogy, affect, and ethics. By foregrounding contradictions, methodological gaps, and structural inequities, we argue that immersive inclusion is a double-edged proposition: it can be a genuine pedagogical opportunity, yet it remains an unsettled systemic challenge. The discussion is organized around four interrelated themes: (1) transformative learning outcomes through immersive co-agency, (2) pedagogical evolution and instructional design innovation, (3) challenges and systemic constraints in scaling for equity, and (4) theoretical and framework convergence toward inclusive immersion.</p>
<sec>
<label>4.1</label>
<title>Transformative learning outcomes through immersive co-agency</title>
<p>Across the reviewed studies, VR combined with Agentic AI offered strong leverage for engaging learners and strengthening motivation and emotional investment. Learners reported more memorable experiences when virtual environments responded dynamically to their actions and affective states (<xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>). This responsiveness reflected co-agency, whereby the system functioned as an adaptive partner, shaping personalized timing, difficulty, and context-sensitive feedback. Such interaction resonates with constructivist principles, which emphasize meaning-making through active experience (<xref ref-type="bibr" rid="B57">Vygotsky, 1978</xref>).</p>
<p>Engagement and motivation emerged as the most frequently reported outcome, reinforcing the argument that agency can serve as a precursor to presence and resonance (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). Nevertheless, the studies also indicate that fascination alone is insufficient: durable educational gains depend on structured scaffolding and reflective debriefing (<xref ref-type="bibr" rid="B24">Makransky and Petersen, 2021</xref>). Without these pedagogic anchors, immersion risks remaining superficial.</p>
<p>Beyond engagement, several studies have reported improvements in cognitive performance, accuracy, and effectiveness when adaptive agents regulated cognitive load in real time (<xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). By monitoring user interaction, agents adjusted task difficulty and stimulus intensity to sustain concentration and support retention. This aligns with refined CLT accounts for immersive contexts, in which the agent assumes a hybrid role between challenge and comfort, reducing overload while still sustaining productive effort. Notably, this adaptivity appeared particularly valuable for neurodivergent learners who often experience cognitive fatigue in traditional classrooms.</p>
<p>Social-communication gains also emerged, with VR&#x02013;AI environments functioning as preparatory spaces for learners with ASD and social anxiety. Studies reported that avatar-based interaction reduced fear of judgment and enabled learners to practice dialogue and empathy in psychologically safer conditions (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>). Similarly, increased confidence and affective safety were reported when socially aware AI tutors supported emotional regulation and reduced anxiety during difficult tasks (<xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>).</p>
<p>However, immersive co-agency also carries limits. Although less common, (<xref ref-type="bibr" rid="B35">Parong and Mayer, 2021</xref>) suggested that fine motor coordination could be disrupted for learners with physical or perceptual difficulties, indicating that embodied interaction may not be universally enabling without careful calibration. Moreover, benefits depended heavily on the sophistication of the AI: scripted or rule-based agents tended to yield weaker and shorter-lived gains than adaptive, emotion-aware systems. This reinforces a central inference across the evidence base; immersion alone is insufficient without real-time pedagogical intelligence.</p>
<p>The studies analyzed have shown that VR coupled with Agentic AI provides strong pedagogical leverage for engaging learners and fostering their motivation and emotional investment. Students reported more memorable experiences when virtual environments dynamically responded to their actions and emotional states (<xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>). Such responsiveness paralleled agentic co-agency and thus an aura of companionship between the learner and the system, which, in turn, facilitated personalized timing, adaptive difficulty, and context-sensitive feedback. It enabled interactive, self-directed learning, thus adhering to constructivist principles, which emphasize the creation of understanding through active experiences (<xref ref-type="bibr" rid="B57">Vygotsky, 1978</xref>).</p>
<p>The findings indicate that agentic AI in immersive learning should be interpreted as an emergent capability rather than a consistently realized condition across the evidence base. While some studies suggest more autonomous, emotion-aware pedagogical agents capable of co-regulating learner experience (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>; <xref ref-type="bibr" rid="B48">Sonawane et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>), a substantial portion of the literature reflects transitional systems that remain primarily assistive or adaptively responsive (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). Consequently, immersive co-agency is better framed as a developmental trajectory, in which adaptive support can serve as a pragmatic precursor to more advanced learner-AI partnership when guided by pedagogical intentionality and reflective scaffolding (<xref ref-type="bibr" rid="B24">Makransky and Petersen, 2021</xref>; <xref ref-type="bibr" rid="B57">Vygotsky, 1978</xref>).</p>
</sec>
<sec>
<label>4.2</label>
<title>Pedagogical evolution and instructional design innovation</title>
<p>The second theme concerns the pedagogical shifts underpinning immersive inclusion. The results identified 10 broad categories of pedagogical applications, reflecting an emerging move toward holistic, personalized learning design. Dominant applications included adaptive learning, experiential simulation, and emotion-aware tutoring, collectively forming a triadic relationship among the learner, the agent, and the environment. In this configuration, AI does not merely deliver content; it increasingly acts as a responsive learning partner. Consequently, teacher-centered models are challenged by new questions of shared agency, autonomy, and trust.</p>
<p>Adaptive learning and personalized instruction have emerged as foundational components across many interventions, with AI agents adjusting difficulty, response timing, and sensory input dynamically (<xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>). Such adaptivity, historically the domain of teachers, can now scale across learning contexts, yet only if agentic responsiveness is transparent and ethically sustained.</p>
<p>Immersive simulation similarly strengthened experiential learning, with studies showing that metaverse classrooms and scenario-based tasks supported the practice of abstract concepts, vocational skills, and social behaviors in realistic yet controlled environments (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). This aligns with experiential learning traditions associated with Kolb, where reflective engagement with simulated experience consolidates understanding.</p>
<p>Emotion-aware tutoring was also prominent in enabling inclusion (<xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>). Through affective computing, including behavioral tracking and facial recognition, agents detected signs of frustration or disengagement and adjusted pacing accordingly. This emotional synchrony shifts AI from purely cognitive machinery to socio-emotional companionship, particularly relevant for SEN learners who rely on predictable, empathetic feedback.</p>
<p>Finally, multisensory and cross-modal design was repeatedly framed as a practical pathway to inclusion. Haptics, audio design, and flexible visual affordances expanded access for learners with sensory impairments and supported embodied cognition (<xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). Autonomous mastery learning further strengthened inclusion by enabling learners to regulate progression through metacognitive feedback loops (<xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). In combination, these pedagogical adaptations suggest a substantive reframing of inclusion: from standardization and placement toward personalized participation.</p>
</sec>
<sec>
<label>4.3</label>
<title>Challenges and systemic constraints in scaling for equity</title>
<p>A consistent pattern across studies is that barriers to immersive innovation are not evenly distributed. In resource-rich contexts, the dominant concerns involved ethical governance, data protection, and teacher preparedness; in resource-constrained contexts, challenges begin with access itself. Studies from Malaysia, India, and the UAE pointed to equipment costs, network instability, and maintenance burdens that limit implementation, particularly in public or rural schooling contexts (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>). Consequently, much of the empirical literature reflects privileged environments, raising epistemic equity concerns about whose realities are represented in this emerging evidence base. These disparities indicate that immersive equity is not only a technical question but a structural justice issue shaped by unequal infrastructure, funding, and policy support.</p>
<p>In addition, pedagogical readiness often lagged behind technical capability. Several studies reported that teachers lacked the professional understanding and pedagogical frameworks needed to integrate immersive systems meaningfully (<xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>). Without structured implementation and intentional instructional design, VR&#x02013;AI risks becoming novelty rather than pedagogy. Accessibility gaps further complicate this reality: while many systems are described as inclusive, few demonstrate compliance with UDL in ways that substantively include learners with visual, auditory, or mobility impairments, who remain underrepresented (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>).</p>
<p>Ethical risks associated with affective and biometric data also remain unresolved. Studies have flagged unclear data management practices, opaque algorithmic decision making, and potential biases that could amplify exclusion (<xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>). However, despite frequent mention of affective AI, formal bias audits and transparent reporting of biometric retention, processing, or consent mechanisms were largely absent, an omission that is particularly concerning given the vulnerability of SEN populations and the sensitivity of behavioral-emotion data.</p>
<p>Finally, learner fatigue and sensory overload emerged as subtle but important constraints. While most studies reported gains in engagement and cognitive accuracy, two studies documented increased sensory overload and cognitive fatigue under prolonged immersion, indicating that co-agency can either mitigate or intensify cognitive load depending on pacing and design quality (<xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). These patterns align with CAMIL, which emphasizes that deep learning arises when immersive presence is supported by emotional regulation; agentic AI potentially operationalizes this by modulating cognitive load, suggesting convergence between CAMIL and adaptive tutoring models.</p>
<p>Relatedly, multiple studies reported that learners struggled when interfaces were fragmented or agent behavior was inconsistent, highlighting that co-agency requires stability and predictability to support inclusion. In particular, (<xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>) noted that disruptions in interface continuity undermined peer collaboration, (<xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>) reported that inconsistent cues from the CAVE-based NivTA assistant confused learners relying on routine, and (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>) described interaction delays that disproportionately affected learners who needed structured predictability for emotional regulation. Taken together, these findings suggest that immersive learning is most effective when intelligent interaction is coherent and rhythmically paced, especially for neurodivergent learners. They also extend socio-constructivist theory by illustrating that meaning can be co-constructed not only among learners and peers, but also between learners and intelligent virtual agents.</p>
</sec>
<sec>
<label>4.4</label>
<title>Theoretical frameworks convergence toward inclusive immersion</title>
<p>The review identified 10 frameworks and models that propose ethical and pedagogical pathways for immersive intelligence. UDL remains foundational, foregrounding sensory access and flexibility of representation (<xref ref-type="bibr" rid="B28">Mohamed et al., 2025</xref>). Studies applying UDL principles were positioned as moving beyond access toward agency and participation (<xref ref-type="bibr" rid="B3">Boot and Hughes, 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>). In parallel, the Meta-MILE model (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>) conceptualizes the metaverse as a multi-layered inclusive environment for active learning, flexible AI, and co-presence. The Agentic Presence and Co-Agency Model (<xref ref-type="bibr" rid="B48">Sonawane et al., 2024</xref>) and Emotionally Intelligent Pedagogical Agent (EIPA) (<xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>) position AI as an active social actor that mediates interaction, shifting the conceptual emphasis from automation toward partnership and ethical companionship; an especially consequential move for inclusive education.</p>
<p>Learning theory also informed design choices. Refined CLT foregrounded task sequencing and flow, whereas Embodied Learning Theory legitimized bodily cognition for SEN learners requiring physical interaction (<xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>). Socio-constructivist assumptions were similarly extended in AI-mediated environments, where knowledge is co-created through interaction and collaboration (<xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). The Hybrid Immersive Pedagogy (HIP) (<xref ref-type="bibr" rid="B50">Sripan and Jeerapattanatorn, 2025</xref>) and Adaptive Metacognitive Scaffolding (AMS) (<xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>) models further emphasized reflective learning and self-regulated mastery as outcomes of AI-supported autonomy.</p>
<p>Overall, the review demonstrates that immersive inclusion requires integrating sensory, cognitive, emotional, and ethical dimensions into a multi-layered foundation synthesized from UDL, CAMIL, socio-constructivism, and agentic-presence models. While many frameworks treat these dimensions separately, the reviewed evidence suggests their interdependence, thereby motivating the development of a more comprehensive theory of immersive equity.</p>
<p>It is important to clarify that the Integrated VR&#x02013;AI Equity Framework proposed in this study is inductively grounded in the empirical patterns identified across the sixteen reviewed studies, rather than imposed <italic>a priori</italic>. Core dimensions of the framework, namely pedagogical co-agency, learning outcome domains, ethical and implementation challenges, and integration frameworks, emerged consistently from a cross-study synthesis of immersive interventions employing varying degrees of adaptive and agentic AI (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>; <xref ref-type="bibr" rid="B4">Chalkiadakis et al., 2024</xref>; <xref ref-type="bibr" rid="B48">Sonawane et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>; <xref ref-type="bibr" rid="B37">Piki et al., 2022</xref>). At the same time, the framework represents a conceptual consolidation that situates these empirical insights within broader theoretical traditions, including UDL, the CAMIL, socio-constructivism, and agentic presence models (<xref ref-type="bibr" rid="B28">Mohamed et al., 2025</xref>; <xref ref-type="bibr" rid="B57">Vygotsky, 1978</xref>; <xref ref-type="bibr" rid="B48">Sonawane et al., 2024</xref>). In this sense, the framework functions both as an evidence-informed explanatory model and as a theory-building contribution that articulates how immersive equity is produced through the interaction of intelligence, embodiment, emotion, and governance.</p>
</sec>
<sec>
<label>4.5</label>
<title>Synthesis: Toward ethical, equitable, and embodied inclusion</title>
<p>In synthesis, VR and Agentic AI appear increasingly positioned as inclusion mediators that shape how learning is designed, delivered, and experienced. However, technological sophistication does not automatically translate into educational equity. Persistent disparities in infrastructure, pedagogy, and ethical governance mean that immersive inclusion can support equity or inadvertently reproduce exclusion. Consequently, equitable scaling depends on a tighter alignment of pedagogy, readiness, and governance alongside inclusive, design-based research approaches that embed accessibility and equity from conception through implementation. Furthermore, frameworks such as Meta-MILE and EIPA require broader empirical testing across diverse geographic contexts to establish robustness beyond resource-rich contexts. Ultimately, it is through the deliberate co-location of technology and ethics, immersion and empathy, that VR&#x02013;AI can more credibly fulfill the promise of immersive equity in which learners, regardless of difference, can meaningfully participate, persist, and flourish within digitally mediated classrooms.</p>
<p>Accordingly, the Integrated VR&#x02013;AI Equity Framework proposed in this review should be understood as an inductive synthesis grounded in recurring cross-study patterns, while also capturing the direction of travel toward stronger forms of agentic co-agency suggested by the most advanced implementations (<xref ref-type="bibr" rid="B59">Yeganeh et al., 2025</xref>; <xref ref-type="bibr" rid="B51">Strielkowski et al., 2025</xref>; <xref ref-type="bibr" rid="B48">Sonawane et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Tracy and Spantidi, 2024</xref>). The framework does not assume that full agentic maturity is present across all current systems; rather, it integrates both present adaptive practices and emerging agentic capacities into a coherent model aligned with inclusive design logics, particularly UDL&#x00027;s emphasis on flexible access and participation (<xref ref-type="bibr" rid="B28">Mohamed et al., 2025</xref>; <xref ref-type="bibr" rid="B29">Mokmin and Ridzuan, 2023</xref>).</p>
<p>In sum, the hybrid of AI and VR has shown promise as a path toward inclusive education. However, its advantages would need to be supported by design consistency, contextual appropriateness, and the presence of ethical safeguards, among others. The integrated nature of immersive experience, smart adaptability, and accessibility could either support equity or, even worse, unwittingly undermine it. The knowledge the study provides calls for the creation of a holistic framework that will not only influence the direction of technological progress but also the workflows, policies, and future research prioritization of education.</p></sec></sec>
<sec id="s5">
<label>5</label>
<title>Integrated VR&#x02013;AI equity framework for inclusive education</title>
<p>The conceptual framework was developed through inductive thematic synthesis across the included empirical studies, followed by theoretical integration. The Integrated VR&#x02013;AI equity framework for inclusive education is a time illustration of the study&#x00027;s key findings, comprising all four interrelated thematic domains that together explain how immersive and intelligent technologies facilitate equitable and inclusive learning environments, as depicted in <xref ref-type="fig" rid="F5">Figure 5</xref>.</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Integrated AI-VR equity framework for inclusive education.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="feduc-11-1761456-g0005.tif">
<alt-text content-type="machine-generated">Diagram illustrating the integration of VR and Agentic AI. Central circle labeled &#x0201C;VR and Agentic AI&#x0201D; connects to four surrounding boxes: &#x0201C;Pedagogical Applications&#x0201D; (scaffolded social practice, goal-directed instruction, dynamic sensory supports), &#x0201C;Learning Outcomes&#x0201D; (engagement and motivation, cognitive performance, social-communication skills, motor-skill development, flexible mastery learning), &#x0201C;Ethical and Implementation Challenges&#x0201D; (access and cost barriers, teacher readiness, algorithmic bias, legal compliance), and &#x0201C;Integration Frameworks&#x0201D; (coagency, affordances, capability approach).</alt-text>
</graphic>
</fig>
<p>The Integrated VR&#x02013;AI Equity Framework is used to envisage four interconnected themes: pedagogical applications, learning outcomes, ethical and implementation challenges, and integration frameworks that enable a holistic view toward immersive inclusion. At its core, the concept of pedagogical innovation emerges at the intersection of VR&#x00027;s embodied immersion and Agentic AI&#x00027;s adaptive intelligence, leading to learner-centered, multimodal, and emotion-aware instructional design.</p>
<p>The realm of learning outcomes suggests that engagement, cognitive development, and affective safety are significantly enhanced when learners co-construct meaning with intelligent virtual agents, due to the transformative nature of co-agency in education.</p>
<p>Conversely, the ethical and practical aspects raise several issues, including infrastructure shortcomings that affect all users, the lack of teacher preparedness, algorithmic discrimination, governance open to scrutiny, and design that is accessible as a form of equity.</p>
<p>These activities are supported by the theory that the integration framework connecting UDL, Emotion-Aware Tutoring, and Hybrid Immersive Pedagogy models forms an all-encompassing structure. The framework plays a distinct role by combining pedagogy, affect, and ethics into one single, adjustable continuum, thereby linking technological progress and social inclusion. Its importance lies not only in theoretical synthesis but also in creating a practical roadmap for scaling immersive equity, which plays a role not only in the efficiency of VR&#x02013;AI ecosystems but also in their acceptance based on factors such as human dignity, diversity, and social justice in the education sector.</p>
<p>The framework draws from several sources: CAMIL (immersion &#x0002B; emotion), UDL (accessibility and flexibility), socio-constructivism (collaborative knowledge construction), and Agentic-AI models (adaptive feedback and co-presence). By merging these normally isolated theoretical areas, the model provides a comprehensive description of how learning, emotion, embodiment, and ethics intermingle in immersive environments. Overall, the Integrated VR&#x02013;AI Equity Framework offers a model that is both theoretically grounded and practically applicable for the analysis and design of immersive learning ecosystems. Its originality lies in viewing AI not only as a cognitive scaffold but also as an affective, ethical, and participatory partner in learning, where immersive co-agency is regarded as a primary mechanism for facilitating inclusive and equitable education.</p></sec>
<sec sec-type="conclusion" id="s6">
<label>6</label>
<title>Conclusion</title>
<p>The present research is directed toward the synergy between VR and Agentic AI in the making of the future inclusive educational practices. The combined findings of sixteen studies indicate that the earliest introduction of these technologies should be through the empowerment and celebration of diversity rather than allowing the coexistence of differences. The immersive co-agency to which the study refers is a situation in which learners interact with intelligent, emotionally responsive systems in an undefined manner, and this interaction is expected to increase participation, mental clarity, and emotional security immediately. However, technology is not the only factor in bringing about such a transformation; it also requires ethical foresight, inclusive design, and teacher training.</p>
<p>The Integrated VR&#x02013;AI Equity Framework should therefore be understood as a synthesis derived from empirical evidence rather than a prescriptive model detached from practice. While firmly anchored in recurring findings across the reviewed studies, it extends beyond individual implementations by integrating these findings into a unified explanatory structure. This dual grounding allows the framework to both reflect current practice and guide future design, research, and policy directions for equitable immersive education.</p>
<p>This framework, integrated into the VR&#x02013;AI system, provides a blueprint for fairness regarding the application. It sets the input elements, namely, UDL, ethics-in-use, and sustainability, alongside the possible subjectivity of the object in this environment, claiming that this will eventually lead to the session of fairness building, that is, the social co-agency of VR skillfully articulated to social equity. This model promises not only to attract but also to be part of ongoing global debates on education, while attempting to establish that VR and AI are not only facilitators of innovation but also the agents of the judges who are restoring the prehistoric dignity of the students often disregarded by conventional teaching methods. Ultimately, the research highlights the need to transform learning environments to support inclusive education. Such environments will be smart, yet kind, accommodating, and humane. Collaboration among technology, education, policymaking, and learners is required on a systemic level to realize this vision and turn new technologies into facilitators of inclusion. The proposed model should serve as a reminder during the digital transformation process that a truly inclusive world cannot be created simply by accommodating all learners with technology; rather, an inclusive world exists when learners are seen, supported, and empowered to succeed in it.</p></sec>
<sec id="s7">
<label>7</label>
<title>Study implications</title>
<p>The integrated VR&#x02013;AI equity framework for inclusive education, along with its findings, has significant implications for the practical, policy, and theoretical dimensions. Taken together, these findings show that real educational equity through immersive and intelligent technologies will be the only viable option within a new pedagogy, governance, and research paradigm that prioritizes inclusion, ethics, and sustainability.</p>
<sec>
<label>7.1</label>
<title>Theoretical implications</title>
<p>Theoretically, this study reframes the concept of inclusion by integrating the constructivist, socio-cultural, and embodied learning paradigms with agentic intelligence theory, positioning immersive education as an interactive state of co-agency in which the learners and the AI systems jointly negotiate meaning, emotion, and control. The author reviews the existing literature by presenting emotionally intelligent pedagogical agents that range from mere cognitive support to those that can discuss sharing and ethical companionship, helping with anxiety regulation, sensory load modulation, and overall psychological safety. The Integrated VR&#x02013;AI Equity Framework further contributes to the development of the learning theory by uncovering that the immersive equity comes to be only when the embodied presence, emotional regulation, cognitive load protection and accessibility infrastructures are working together; a joining that has been rarely seen in the earlier frameworks like CAMIL, UDL, socio-constructivism and cognitive load theory which have been isolating and thus addressing these elements. By foregrounding co-agency as a new pedagogical construct, the framework reconceptualizes AI not as a passive instructional tool but as an active learning partner that collaborates with learners to shape pacing, depth, and affective support. Additionally, the framework positions emotional regulation as a core mechanism of knowledge construction in immersive environments, making affect central rather than peripheral to inclusive pedagogy. Collectively, these insights transform educational technology from a tool of efficiency into a tool of empowerment, insisting that theoretical conversations about immersion must be simultaneously pedagogical, ethical, and social, and aligned with a broader pursuit of educational justice.</p>
</sec>
<sec>
<label>7.2</label>
<title>Practical implications</title>
<sec>
<label>7.2.1</label>
<title>Pedagogy</title>
<p>The proposed framework advances an inclusive pedagogical shift away from one-size-fits-all instruction toward adaptive, emotion-aware, and multisensory learning. It reconceptualizes the role of educators from passive transmitters of content to co-agency facilitators, collaborating with agentic AI systems to personalize the pace, modality, and feedback in real time. This pedagogical orientation aligns with UDL principles, recognizing that cognitive diversity, emotional regulation needs, and sensory preferences must be actively supported throughout the learning process, particularly for neurodivergent learners.</p></sec>
<sec>
<label>7.2.2</label>
<title>Design</title>
<p>From a design perspective, the framework prioritizes adaptive intelligence over static immersion, reflecting evidence from the reviewed studies that VR alone yields limited or inconsistent learning benefits without intelligent mediation. Accordingly, the framework embeds safety-by-design and accessibility-by-design principles at the architectural level, ensuring configurable sensory settings, flexible interaction modes, and emotionally responsive feedback. By addressing inclusion at the design stage rather than retrofitting solutions post-deployment, the framework seeks to prevent the reproduction of exclusion and discrimination within immersive environments.</p></sec>
<sec>
<label>7.2.3</label>
<title>Implementation</title>
<p>Successful implementation is contingent on teacher facilitation and institutional support. The framework emphasizes that investment in professional development determines whether immersive innovations translate into equitable learning outcomes. Well-prepared educators are better positioned to interpret AI-generated feedback, orchestrate virtual experiences, and support learners&#x00027; emotional regulation. Consequently, inclusion extends beyond technical functionality into everyday classroom practice, where trained teachers act as critical mediators between immersive technology and meaningful, equitable learning experiences.</p>
</sec>
</sec>
<sec>
<label>7.3</label>
<title>Policy and system level implications</title>
<p>The framework is a policy-oriented call for national and institutional governance to regulate immersive VR&#x02013;AI technologies in education in an ethical, equitable, and sustainable manner. The protection of emotional, biometric, and behavioral data is central to such governance, which necessitates establishing transparent standards for data collection, imposing restrictions on the misuse of surveillance, and conducting mandatory fairness audits to ensure that decisions made by intelligent agents are not biased. Furthermore, the framework argues that the teaching of immersive ethics and accessibility must be incorporated into teacher-training curricula and accreditation programs, rather than treated as optional add-ons, to ensure that teachers are equipped to address the complex responsibilities of emotion-sensitive and adaptive AI. Governments, NGOs, and donors must prioritize funding for low-cost VR devices, bandwidth optimization, and context-appropriate AI models. At the same time, current access is heavily tilted toward affluent regions and private institutions. This way, schools with limited resources can still participate at an affordable cost, such as through smartphone headsets or existing collaborative metaverse platforms. In addition, institutions should create strategic digital-transformation plans that align with budgeting for sustainable hardware, cross-departmental collaboration as the norm, and the embedding of ethical procurement standards. Ultimately, the framework shifts the narrative on immersive equity from a technological luxury to a basic educational right, compelling the policy landscape to change to ensure safe, accessible, and just participation for all learners, especially those most at risk of exclusion.</p></sec></sec>
<sec id="s8">
<label>8</label>
<title>Limitations and future research</title>
<p>Overall, the review presented is solid and includes a conceptual framework. Nevertheless, certain limitations need to be considered. Firstly, the research relied on 16 empirical articles published between 2020 and 2025. These articles varied in their methodologies but represented only a small fraction of the global research on inclusive education and immersive, intelligent technologies. Therefore, the conclusions of the study might be biased due to the region or context, which is likely true since most of the research comes from the Global North, that is, the USA, China, and Europe. This geographical imbalance affects the research findings, either through a lack of application or by the limited area to which they can be generalized, especially in Sub-Saharan Africa and South Asia, where infrastructural realities and policies have quite different meanings. Secondly, the review was restricted to empirical studies, thus omitting conceptual and systematic reviews, which could have provided a better understanding and contributed to theoretical development or the long-term evolution. This limits attainability but narrows the potential theoretical synthesis. On the other hand, the variety in research designs, sample sizes, and study populations included created challenges in drawing general conclusions from the findings. One group of studies included small-scale pilot interventions in controlled settings, and, thus, it is likely that the effects of VR&#x02013;AI interventions, which are usually diminished over time, were amplified by the initial novelty. Another group had no long-term data and therefore could not draw any meaningful conclusions about the extent to which learning outcomes or behavioral changes persisted over the intervention period. Then, access reviews and ethical reviews continued to be carried out and handed down occasionally, in turn, drawing attention to the still-existing disparity between technological aspirations and empirical verification.</p>
<p>Therefore, future investigations should adopt the longitudinal, cross-contextual, and interdisciplinary paradigms to study the dynamics of immersive co-agency through time, cultures, and learner groups. Moreover, there is an obvious demand for participatory research that actively involves the voices of disabled students and their teachers so that future tech innovations are not made for inclusion but rather are developed with and by the very people they serve. In this vein, future research should consider low-cost, scalable configurations, such as mobile or augmented reality hybrids, to determine how an inclusive VR&#x02013;AI can flourish in a resource-constrained environment. Finally, ethnographic and neuro-cognitive approaches could be very insightful for understanding how students emotionally and cognitively occupy immersive spaces, thereby anchoring inclusion in lived experiences rather than mere theoretical aspirations.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s9">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>, further inquiries can be directed to the corresponding author/s.</p>
</sec>
<sec sec-type="ethics-statement" id="s10">
<title>Ethics statement</title>
<p>The study was approved by University of South AFRICA; College of Science, Engineering and Technology School of Computing ERC; Ref &#x00023;: 9813. The studies were conducted in accordance with the local legislation and institutional requirements. Written informed consent for participation was not required from the participants or the participants&#x00027; legal guardians/next of kin in accordance with the national legislation and institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="s11">
<title>Author contributions</title>
<p>BC: Conceptualization, Methodology, Software, Validation, Writing &#x02013; review &#x00026; editing. KM: Conceptualization, Methodology, Software, Data curation, Formal analysis, Visualization, Writing &#x02013; original draft. BM: Conceptualization, Data curation, Formal analysis, Methodology, Software, Validation, Visualization, Writing &#x02013; original draft.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s13">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. The author(s) used the artificial intelligence tools ChatGPT 5 and Grammarly AI for English language control. The fluency and spelling of the English manuscript are targeted, and there is no AI-generated content. After the language check using AI tools, the author(s) rechecked and approved the content, taking full responsibility for this publication.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec sec-type="disclaimer" id="s14">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s15">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/feduc.2026.1761456/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/feduc.2026.1761456/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Table_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ardai</surname> <given-names>E.</given-names></name> <name><surname>V&#x000E1;mos</surname> <given-names>T.</given-names></name> <name><surname>Papp</surname> <given-names>G.</given-names></name> <name><surname>Berencsi</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Virtual reality therapy in special needs education</article-title>. <source>J. Early Years Educ.</source> <volume>10</volume>, <fpage>259</fpage>&#x02013;<lpage>271</lpage>. doi: <pub-id pub-id-type="doi">10.31074/gyntf.2022.3.259.271</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Beaunoyer</surname> <given-names>E.</given-names></name> <name><surname>Dup&#x000E9;r&#x000E9;</surname> <given-names>S.</given-names></name> <name><surname>Guitton</surname> <given-names>M. J.</given-names></name></person-group> (<year>2020</year>). <article-title>COVID-19 and digital inequalities: reciprocal impacts and mitigation strategies</article-title>. <source>Comput. Human Behav.</source> <volume>111</volume>:<fpage>106424</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chb.2020.106424</pub-id><pub-id pub-id-type="pmid">32398890</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Boot</surname> <given-names>C. W.</given-names></name> <name><surname>Hughes</surname> <given-names>F. S.</given-names></name></person-group> (<year>2025</year>). <article-title>Mathematics ahead : leveraging immersive virtual reality to create equitable and transformative learning</article-title> <source>experiences. Fut. Educ. Res</source>. <fpage>1</fpage>&#x02013;<lpage>13</lpage>. doi: <pub-id pub-id-type="doi">10.1002/fer3.70024</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chalkiadakis</surname> <given-names>A.</given-names></name> <name><surname>Seremetaki</surname> <given-names>A.</given-names></name> <name><surname>Kanellou</surname> <given-names>A.</given-names></name> <name><surname>Kallishi</surname> <given-names>M.</given-names></name> <name><surname>Moraitaki</surname> <given-names>M.</given-names></name> <name><surname>Morfopoulou</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Impact of artificial intelligence and virtual reality on educational inclusion: a systematic review of technologies supporting students with disabilities</article-title>. <source>Educ. Sci.</source> <volume>14</volume>, <fpage>1</fpage>&#x02013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.3390/educsci14111223</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cook</surname> <given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>Conceptualisations of neurodiversity and barriers to inclusive pedagogy in schools: a perspective article</article-title>. <source>J. Res. Spec. Educ. Needs</source> <volume>24</volume>, <fpage>627</fpage>&#x02013;<lpage>636</lpage>. doi: <pub-id pub-id-type="doi">10.1111/1471-3802.12656</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cooke</surname> <given-names>A.</given-names></name> <name><surname>Smith</surname> <given-names>D.</given-names></name> <name><surname>Booth</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>Beyond PICO: the SPIDER tool for qualitative evidence synthesis</article-title>. <source>Qual. Health Res.</source> <volume>22</volume>, <fpage>1435</fpage>&#x02013;<lpage>1443</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1049732312452938</pub-id><pub-id pub-id-type="pmid">22829486</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cote</surname> <given-names>A.</given-names></name> <name><surname>Banks</surname> <given-names>M.</given-names></name></person-group> (<year>2025</year>). <source>Global Disability Inclusion Report</source>. Berlin.</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Creed</surname> <given-names>C.</given-names></name> <name><surname>Al-Kalbani</surname> <given-names>M.</given-names></name> <name><surname>Theil</surname> <given-names>A.</given-names></name> <name><surname>Sarcar</surname> <given-names>S.</given-names></name> <name><surname>Williams</surname> <given-names>I.</given-names></name></person-group> (<year>2024</year>). <article-title>Inclusive AR/VR: accessibility barriers for immersive technologies</article-title>. <source>Univers. Access Inf. Soc.</source> <volume>23</volume>, <fpage>59</fpage>&#x02013;<lpage>73</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10209-023-00969-0</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Halkiopoulos</surname> <given-names>C.</given-names></name> <name><surname>Gkintoni</surname> <given-names>E.</given-names></name></person-group> (<year>2024</year>). <article-title>Leveraging AI in E-learning: personalized learning and adaptive assessment through cognitive neuropsychology&#x02014;a systematic analysis</article-title>. <source>Electron</source> <volume>13</volume>:<fpage>18</fpage>. doi: <pub-id pub-id-type="doi">10.3390/electronics13183762</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hamed</surname> <given-names>R.</given-names></name> <name><surname>Gdanski</surname> <given-names>E.</given-names></name> <name><surname>Kim</surname> <given-names>J.</given-names></name> <name><surname>Le</surname> <given-names>J.</given-names></name> <name><surname>Lopez</surname> <given-names>A.</given-names></name> <name><surname>Panjwani</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>The use of virtual reality for student training on bias and microaggressions</article-title>. <source>J. Occup. Ther. Educ.</source> <volume>8</volume>:<fpage>106</fpage>. doi: <pub-id pub-id-type="doi">10.26681/jote.2024.080106</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hong</surname> <given-names>Q. N.</given-names></name> <name><surname>Bartlett</surname> <given-names>G.</given-names></name> <name><surname>Vedel</surname> <given-names>I.</given-names></name> <name><surname>Pluye</surname> <given-names>P.</given-names></name> <name><surname>F&#x000E0;bregues</surname> <given-names>S.</given-names></name> <name><surname>Boardman</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2018a</year>). <article-title>The mixed methods appraisal tool (MMAT) version 2018 for information professionals and researchers</article-title>. <source>Educ. Inf.</source> <volume>34</volume>, <fpage>285</fpage>&#x02013;<lpage>291</lpage>. doi: <pub-id pub-id-type="doi">10.3233/EFI-180221</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hong</surname> <given-names>Q. N.</given-names></name> <name><surname>Gonzalez-Reyes</surname> <given-names>A.</given-names></name> <name><surname>Pluye</surname> <given-names>P.</given-names></name></person-group> (<year>2018b</year>). <article-title>Improving the usefulness of a tool for appraising the quality of qualitative, quantitative and mixed methods studies, the mixed methods appraisal tool (MMAT)</article-title>. <source>J. Eval. Clin. Pract.</source> <volume>24</volume>, <fpage>459</fpage>&#x02013;<lpage>467</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jep.12884</pub-id><pub-id pub-id-type="pmid">29464873</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hutson</surname> <given-names>J.</given-names></name> <name><surname>McGinley</surname> <given-names>C.</given-names></name></person-group> (<year>2023</year>). <article-title>Neuroaffirmative approaches to extended reality: empowering individuals with autism spectrum condition through immersive learning environments</article-title>. <source>Int. J. Technol. Educ. Sci.</source> <volume>7</volume>, <fpage>400</fpage>&#x02013;<lpage>414</lpage>. doi: <pub-id pub-id-type="doi">10.46328/ijtes.499</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ionit&#x00103;</surname> <given-names>A. R.</given-names></name> <name><surname>Anghel</surname> <given-names>D. C.</given-names></name> <name><surname>Boudouh</surname> <given-names>T.</given-names></name></person-group> (<year>2025</year>). <article-title>Mind, machine, and meaning: cognitive ergonomics and adaptive interfaces in the age of industry 5.0</article-title>. <source>Appl. Sci.</source> <volume>15</volume>, <fpage>1</fpage>&#x02013;<lpage>37</lpage>. doi: <pub-id pub-id-type="doi">10.3390/app15147703</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Jia</surname> <given-names>Y.</given-names></name> <name><surname>Sin</surname> <given-names>Z. P. T.</given-names></name> <name><surname>Wang</surname> <given-names>X. E.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <name><surname>Ng</surname> <given-names>P. H. F.</given-names></name> <name><surname>Huang</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>&#x0201C;NivTA: towards a naturally interactable edu-metaverse teaching assistant for CAVE,&#x0201D;</article-title> in <source>Proceedings of the 2024 IEEE International Conference on Metaverse Computing, Networking, and Applications (MetaCom)</source> (<publisher-loc>Hong Kong</publisher-loc>), <fpage>57</fpage>&#x02013;<lpage>64</lpage>. doi: <pub-id pub-id-type="doi">10.1109/MetaCom62920.2024.00023</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Klimanov</surname> <given-names>D.</given-names></name> <name><surname>Tretyak</surname> <given-names>O.</given-names></name></person-group> (<year>2024</year>). <article-title>Pandemic-driven business model sustainability: a review</article-title>. <source>J. Bus. Ind. Mark.</source> <volume>39</volume>, <fpage>1645</fpage>&#x02013;<lpage>1668</lpage>. doi: <pub-id pub-id-type="doi">10.1108/JBIM-11-2022-0492</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>LaFountain</surname> <given-names>V.</given-names></name></person-group> (<year>2025</year>). <source>An Analysis of the Effects of Teaching Social-Emotional Learning Skills on Special Education Needs (SEN) Students&#x00027; Executive Functioning and Cognitive Skills</source>. <publisher-loc>San Diego, CA</publisher-loc>: <publisher-name>National University</publisher-name>.</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liao</surname> <given-names>A. Y. H.</given-names></name> <name><surname>Huang</surname> <given-names>S. P.</given-names></name> <name><surname>Ikezawa</surname> <given-names>T.</given-names></name> <name><surname>Lin</surname> <given-names>K. Y.</given-names></name></person-group> (<year>2024</year>). <source>An Experiential Learning Platform Adopting PBL and Mix-Reality for Artificial Intelligence Literacy Education, Vol. 214</source>. Cham: Springer Nature Switzerland. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-64766-6_33</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Zhuo</surname> <given-names>S.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Dilon</surname> <given-names>A.</given-names></name> <name><surname>Howell</surname> <given-names>N.</given-names></name> <name><surname>Smith</surname> <given-names>A. D. R.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>From regulation to support: centering humans in technology-mediated emotion intervention in care contexts. arXiv Prepr</article-title>. <source>arXiv12614, Vol. 9</source>. doi: <pub-id pub-id-type="doi">10.1145/3757605</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lorenzo Lled&#x000F3;</surname> <given-names>G.</given-names></name></person-group> (<year>2025</year>). <article-title>Improving play skills in autistic students through the use of virtual reality</article-title>. <source>Digit. Educ. Rev.</source> <volume>47</volume>, <fpage>300</fpage>&#x02013;<lpage>315</lpage>. doi: <pub-id pub-id-type="doi">10.1344/der.2025.47.300-315</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lorenzo-Lled&#x000F3;</surname> <given-names>A.</given-names></name> <name><surname>Lorenzo</surname> <given-names>G.</given-names></name> <name><surname>Lled&#x000F3;</surname> <given-names>A.</given-names></name> <name><surname>P&#x000E9;rez</surname> <given-names>E.</given-names></name></person-group> (<year>2023</year>). <article-title>Inclusive education at university: a scientific mapping analysis</article-title>. <source>Qual. Quant.</source> <volume>58</volume>, <fpage>1</fpage>&#x02013;<lpage>25</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11135-023-01712-w</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lowell</surname> <given-names>V. L.</given-names></name> <name><surname>Yan</surname> <given-names>W.</given-names></name></person-group> (<year>2024</year>). <article-title>Applying systems thinking for designing immersive virtual reality learning experiences in education</article-title>. <source>TechTrends</source> <volume>68</volume>, <fpage>149</fpage>&#x02013;<lpage>160</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11528-023-00922-1</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Maguraushe</surname> <given-names>K.</given-names></name> <name><surname>Masimba</surname> <given-names>F.</given-names></name> <name><surname>Chimbo</surname> <given-names>B.</given-names></name></person-group> (<year>2025</year>). <article-title>Advancing inclusive educational VR: a bibliometric study of interface design</article-title>. <source>J. Inf. Syst. Inf.</source> <volume>7</volume>, <fpage>2978</fpage>&#x02013;<lpage>3004</lpage>. doi: <pub-id pub-id-type="doi">10.51519/journalisi.v7i3.1271</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Makransky</surname> <given-names>G.</given-names></name> <name><surname>Petersen</surname> <given-names>G. B.</given-names></name></person-group> (<year>2021</year>). <article-title>The cognitive affective model of immersive learning (CAMIL): a theoretical research-based model of learning in immersive virtual reality</article-title>. <source>Educ. Psychol. Rev.</source> <volume>33</volume>, <fpage>937</fpage>&#x02013;<lpage>958</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10648-020-09586-2</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>M&#x000E4;ntyoja</surname> <given-names>M.</given-names></name> <name><surname>Hautala</surname> <given-names>J.</given-names></name></person-group> (<year>2025</year>). <article-title>Possibilities and limitations of learning in Zoom, on campus, and in VR&#x02013;students&#x00027; experiences of hybrid learning spaces</article-title>. <source>J. Geogr. High. Educ.</source> <volume>49</volume>, <fpage>513</fpage>&#x02013;<lpage>531</lpage>. doi: <pub-id pub-id-type="doi">10.1080/03098265.2025.2521264</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Mapfumo</surname> <given-names>K.</given-names></name> <name><surname>Masuka</surname> <given-names>L.</given-names></name> <name><surname>Ncube</surname> <given-names>E. R.</given-names></name> <name><surname>Ndlovu</surname> <given-names>B.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;<italic>Exploring the factors influencing the adoption of virtual reality and augmented reality in</italic> education,&#x0201D;</article-title> in <source>7th European Conference on Industrial Engineering and Operations Management</source> (<publisher-loc>Augsburg</publisher-loc>). doi: <pub-id pub-id-type="doi">10.46254/EU07.20240166</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Maroju</surname> <given-names>P. K.</given-names></name> <name><surname>Bhattacharya</surname> <given-names>P.</given-names></name></person-group> (<year>2025</year>). <article-title>&#x0201C;Understanding emotional intelligence: the heart of human-centered technology,&#x0201D;</article-title> in <source>Humanizing Technology With Emotional Intelligence</source> (<publisher-loc>Hershey, PA</publisher-loc>: <publisher-name>IGI Global Scientific Publishing</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>18</lpage>. doi: <pub-id pub-id-type="doi">10.4018/979-8-3693-7011-7.ch001</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mohamed</surname> <given-names>A.</given-names></name> <name><surname>Faisal</surname> <given-names>R.</given-names></name> <name><surname>Al-Gindy</surname> <given-names>A.</given-names></name> <name><surname>Shaalan</surname> <given-names>K.</given-names></name></person-group> (<year>2025</year>). <article-title>Artificial intelligence and immersive technologies: virtual assistants in AR/VR for special needs learners&#x02020;</article-title>. <source>Computers</source> <volume>14</volume>, <fpage>1</fpage>&#x02013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.3390/computers14080306</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Mokmin</surname> <given-names>N. A. M.</given-names></name> <name><surname>Ridzuan</surname> <given-names>N. N. I.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Virtual reality trainers for students with disability: analysis of students&#x00027; motivation and motor performance,&#x0201D;</article-title> in <source>International Conference on Research in Education and Science</source> (<publisher-loc>Cappadocia</publisher-loc>), <fpage>39</fpage>&#x02013;<lpage>55</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.istes.org">www.istes.org</ext-link> <ext-link ext-link-type="uri" xlink:href="https://orcid.org/0009-0006-7255-7862">https://orcid.org/0009-0006-7255-7862</ext-link>.</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ndlovu</surname> <given-names>B.</given-names></name> <name><surname>Maguraushe</surname> <given-names>K.</given-names></name></person-group> (<year>2025</year>). <article-title>Balancing ethics and privacy in the use of artificial intelligence in institutions of higher learning: a framework for responsive AI systems</article-title>. <source>IJIE (Indonesian J. Informatics Educ.) 9:</source>39. doi: <pub-id pub-id-type="doi">10.20961/ijie.v9i1.100723</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ndlovu</surname> <given-names>B. M.</given-names></name> <name><surname>Maphosa</surname> <given-names>N.</given-names></name> <name><surname>Dube</surname> <given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Virtual reality (VR) simulation of chemistry lab using blender and unity&#x0201D;, in Official Conference Proceedings of the IAFOR Conference on Educational Research &#x00026; Innovation 2023 (Nagoya</article-title>), <fpage>101</fpage>&#x02013;<lpage>109</lpage>.</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>O&#x00027;Connor</surname> <given-names>U.</given-names></name> <name><surname>Courtney</surname> <given-names>C.</given-names></name> <name><surname>Mulhall</surname> <given-names>P.</given-names></name> <name><surname>Taggart</surname> <given-names>L.</given-names></name></person-group> (<year>2023</year>). <article-title>The prevalence of special educational needs in Northern Ireland: a comparative analysis</article-title>. <source>Eur. J. Spec. Needs Educ.</source> <volume>38</volume>, <fpage>543</fpage>&#x02013;<lpage>557</lpage>. doi: <pub-id pub-id-type="doi">10.1080/08856257.2022.2127082</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Owen</surname> <given-names>S.</given-names></name> <name><surname>Evans</surname> <given-names>R.</given-names></name> <name><surname>Wolfe</surname> <given-names>C.</given-names></name> <name><surname>Evans</surname> <given-names>L.</given-names></name> <name><surname>Pugsley</surname> <given-names>R.</given-names></name></person-group> (<year>2024</year>). <article-title>Enhancing learning through immersive technologies: insights and challenges from educators in Wales</article-title>. <source>Wales J. Educ.</source> <fpage>14</fpage>&#x02013;<lpage>26</lpage>. doi: <pub-id pub-id-type="doi">10.16922/focus8</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Page</surname> <given-names>M. J.</given-names></name> <name><surname>Moher</surname> <given-names>D.</given-names></name> <name><surname>Bossuyt</surname> <given-names>P. M.</given-names></name> <name><surname>Boutron</surname> <given-names>I.</given-names></name> <name><surname>Hoffmann</surname> <given-names>T. C.</given-names></name> <name><surname>Mulrow</surname> <given-names>C. D.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>PRISMA 2020 explanation and elaboration: Updated guidance and exemplars for reporting systematic reviews</article-title>. <source>BMJ</source> 372. doi: <pub-id pub-id-type="doi">10.1136/bmj.n160</pub-id><pub-id pub-id-type="pmid">33781993</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parong</surname> <given-names>J.</given-names></name> <name><surname>Mayer</surname> <given-names>R. E.</given-names></name></person-group> (<year>2021</year>). <article-title>Cognitive and affective processes for learning science in immersive virtual reality</article-title>. <source>J. Comput. Assist. Learn.</source> <volume>37</volume>, <fpage>226</fpage>&#x02013;<lpage>241</lpage>. doi: <pub-id pub-id-type="doi">10.1111/jcal.12482</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Petersen</surname> <given-names>G. B.</given-names></name> <name><surname>Petkakis</surname> <given-names>G.</given-names></name> <name><surname>Makransky</surname> <given-names>G.</given-names></name></person-group> (<year>2022</year>). <article-title>A study of how immersion and interactivity drive VR learning</article-title>. <source>Comput. Educ.</source> <volume>179</volume>:<fpage>104429</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compedu.2021.104429</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Piki</surname> <given-names>A.</given-names></name> <name><surname>Nisiotis</surname> <given-names>L.</given-names></name> <name><surname>Alboul</surname> <given-names>L.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;A preliminary exploration of the learning and engagement potential of an intelligent virtual environment,&#x0201D;</article-title> in <source>Proceedings of the 2022 IEEE 2nd International Conference on Intelligent Reality (ICIR)</source> (<publisher-loc>Piscataway, NJ</publisher-loc>), <fpage>27</fpage>&#x02013;<lpage>30</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICIR55739.2022.00021</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prada</surname> <given-names>M. A.</given-names></name> <name><surname>Fuertes</surname> <given-names>J. J.</given-names></name> <name><surname>Rodr&#x000ED;guez-Ossorio</surname> <given-names>J. R.</given-names></name> <name><surname>Gonz&#x000E1;lez-Herb&#x000F3;n</surname> <given-names>R.</given-names></name> <name><surname>Gonz&#x000E1;lez-Mateos</surname> <given-names>G.</given-names></name> <name><surname>Dom&#x000ED;nguez</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Hands-on training in industrial cybersecurity for a multidisciplinary Master&#x00027;s degree</article-title>. <source>IFAC-PapersOnLine</source> <volume>56</volume>, <fpage>11217</fpage>&#x02013;<lpage>11222</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ifacol.2023.10.850</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Quirke</surname> <given-names>M.</given-names></name> <name><surname>Galvin</surname> <given-names>T.</given-names></name></person-group> (<year>2025</year>). <article-title><italic>Universal design for learning and the sustainable development</italic> goals: reimagining inclusive education&#x02014;an Alice in wonderland journey</article-title>. <source>Eur. J. Incl. Educ.</source> <volume>4</volume>:<fpage>150454</fpage>. doi: <pub-id pub-id-type="doi">10.7146/ejie.v4i1.150454</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Register</surname> <given-names>Y.</given-names></name></person-group> (<year>2024</year>). <source>The Future of AI Can Be Kind: Strategies for Embedded Ethics in AI Education</source>. <publisher-loc>Washington, DC</publisher-loc>: <publisher-name>University of Washington</publisher-name>.</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Roberts</surname> <given-names>D.</given-names></name></person-group> (<year>2022</year>). <article-title>Multimedia learning methods and affective, behavioural and cognitive engagement: a universal approach to dyslexia?</article-title> <source>J. Furth. High. Educ.</source> <volume>46</volume>, <fpage>62</fpage>&#x02013;<lpage>75</lpage>. doi: <pub-id pub-id-type="doi">10.1080/0309877X.2021.1879746</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Schuster</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <source>The Role of International Organisations and Non-State Actors in the Implementation of the UN Convention on the Rights of Persons with Disabilities and Inclusive Education</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://refubium.fu-berlin.de/handle/fub188/33629%0Ahttps://refubium.fu-berlin.de/bitstream/handle/fub188/33629/Dissertation_Schuster.pdf?sequence=3%0Ahttps://refubium.fu-berlin.de/handle/fub188/33629?show=full">https://refubium.fu-berlin.de/handle/fub188/33629%0Ahttps://refubium.fu-berlin.de/bitstream/handle/fub188/33629/Dissertation_Schuster.pdf?sequence=3%0Ahttps://refubium.fu-berlin.de/handle/fub188/33629?show=full</ext-link>. (Accessed October 22, 2025).</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Selwyn</surname> <given-names>N.</given-names></name></person-group> (<year>2022</year>). <article-title>The future of AI and education: some cautionary notes</article-title>. <source>Eur. J. Educ.</source> <volume>57</volume>, <fpage>620</fpage>&#x02013;<lpage>631</lpage>. doi: <pub-id pub-id-type="doi">10.1111/ejed.12532</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shu</surname> <given-names>X.</given-names></name> <name><surname>Gu</surname> <given-names>X.</given-names></name></person-group> (<year>2023</year>). <article-title>An empirical study of a smart education model enabled by the edu-metaverse to enhance better learning outcomes for students</article-title>. <source>Systems</source> <volume>11</volume>:<fpage>20075</fpage>. doi: <pub-id pub-id-type="doi">10.3390/systems11020075</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Siddiqi</surname> <given-names>M. M.</given-names></name></person-group> (<year>2024</year>). <article-title>Future of digital education: inclusive, immersive, equitable</article-title>. <source>Mediasp. DME Media J. Commun.</source> <volume>5</volume>, <fpage>8</fpage>&#x02013;<lpage>24</lpage>. doi: <pub-id pub-id-type="doi">10.53361/dmejc.v5i01.02</pub-id></mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>T.</given-names></name> <name><surname>Dutta</surname> <given-names>S.</given-names></name> <name><surname>Vyas</surname> <given-names>S.</given-names></name> <name><surname>Rocha</surname> <given-names>&#x000C1;.</given-names></name></person-group> (<year>2024</year>). <source>Explainable AI for Education: Recent Trends and Challenges, Vol. 19</source> (<publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer Nature</publisher-name>). doi: <pub-id pub-id-type="doi">10.1007/978-3-031-72410-7</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Smith</surname> <given-names>S. J.</given-names></name> <name><surname>Rowland</surname> <given-names>A.</given-names></name> <name><surname>Goldman</surname> <given-names>S.</given-names></name> <name><surname>Carreon</surname> <given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>A guide for special education leaders to utilize artificial intelligence: students&#x00027; perspectives for future consideration</article-title>. <source>J. Spec. Educ. Leadersh.</source> <volume>37</volume>, <fpage>77</fpage>&#x02013;<lpage>92</lpage>.</mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Sonawane</surname> <given-names>P.</given-names></name> <name><surname>Ramteke</surname> <given-names>S.</given-names></name> <name><surname>Kulkarni</surname> <given-names>V.</given-names></name> <name><surname>Pawar</surname> <given-names>U.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;Introducing interactive elements to mobile virtual reality: leveraging smartphone sensors for immersive experiences,&#x0201D;</article-title> in <source>2024 8th International Conference on Computing, Communication, Control and Automation (ICCUBEA 2024)</source> (<publisher-loc>Pune</publisher-loc>), <fpage>1</fpage>&#x02013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICCUBEA61740.2024.10774852</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Song</surname> <given-names>Y.</given-names></name> <name><surname>Cao</surname> <given-names>J.</given-names></name> <name><surname>Wu</surname> <given-names>K.</given-names></name> <name><surname>Yu</surname> <given-names>P. L. H.</given-names></name> <name><surname>Lee</surname> <given-names>J. C. K.</given-names></name></person-group> (<year>2023</year>). <article-title>Developing &#x02018;learningverse&#x00027;&#x02014;A 3-D metaverse platform to support teaching, social, and cognitive presences</article-title>. <source>IEEE Trans. Learn. Technol.</source> <volume>16</volume>, <fpage>1165</fpage>&#x02013;<lpage>1178</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TLT.2023.3276574</pub-id></mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sripan</surname> <given-names>T.</given-names></name> <name><surname>Jeerapattanatorn</surname> <given-names>P.</given-names></name></person-group> (<year>2025</year>). <article-title>Metaverse-based learning: a comprehensive review of current trends, challenges, and future implications</article-title>. <source>Contemp. Educ. Technol.</source> <volume>17</volume>:<fpage>ep584</fpage>. doi: <pub-id pub-id-type="doi">10.30935/cedtech/16434</pub-id></mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Strielkowski</surname> <given-names>W.</given-names></name> <name><surname>Grebennikova</surname> <given-names>V.</given-names></name> <name><surname>Lisovskiy</surname> <given-names>A.</given-names></name> <name><surname>Rakhimova</surname> <given-names>G.</given-names></name> <name><surname>Vasileva</surname> <given-names>T.</given-names></name></person-group> (<year>2025</year>). <article-title>AI-driven adaptive learning for sustainable educational transformation</article-title>. <source>Sustain. Dev.</source> <volume>33</volume>, <fpage>1921</fpage>&#x02013;<lpage>1947</lpage>. doi: <pub-id pub-id-type="doi">10.1002/sd.3221</pub-id></mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Strousopoulos</surname> <given-names>P.</given-names></name> <name><surname>Troussas</surname> <given-names>C.</given-names></name> <name><surname>Krouska</surname> <given-names>A.</given-names></name> <name><surname>Sgouropoulou</surname> <given-names>C.</given-names></name></person-group> (<year>2024</year>). <source>Architecting Immersive Education: Designing an Intelligent Online Virtual University, Vol. 1170. LNNS</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer Nature Switzerland</publisher-name>. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-73344-4_29</pub-id></mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Syafiq</surname> <given-names>R. F.</given-names></name> <name><surname>Hakim</surname> <given-names>H.</given-names></name></person-group> (<year>2024</year>). <article-title>Virtual reality as a tool for teaching children with special needs</article-title>. <source>Proc. Int. Conf. Inov. Sci. Technol. Educ. Children Health</source> <volume>4</volume>, <fpage>169</fpage>&#x02013;<lpage>173</lpage>. doi: <pub-id pub-id-type="doi">10.62951/icistech.v4i1.137</pub-id></mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tanprasert</surname> <given-names>T.</given-names></name></person-group> (<year>2025</year>). <source>Agent personal design for engagement in virtual dialogic learning environments (Doctoral</source> dissertation). University of British Columbia.</mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Teo</surname> <given-names>S. A.</given-names></name></person-group> (<year>2023</year>). <article-title>Human dignity and AI: mapping the contours and utility of human dignity in addressing challenges presented by AI</article-title>. <source>Law Innov. Technol.</source> <volume>15</volume>:<fpage>2184132</fpage>. doi: <pub-id pub-id-type="doi">10.1080/17579961.2023.2184132</pub-id></mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tracy</surname> <given-names>K.</given-names></name> <name><surname>Spantidi</surname> <given-names>O.</given-names></name></person-group> (<year>2024</year>). <article-title>Impact of GPT-driven teaching assistants in VR learning environments</article-title>. <source>IEEE Trans. Learn. Technol.</source> <volume>18</volume>, <fpage>192</fpage>&#x02013;<lpage>205</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TLT.2025.3539179</pub-id></mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Vygotsky</surname> <given-names>L. S.</given-names></name></person-group> (<year>1978</year>). <source>Mind in Society: Development of Higher Psychological Processes</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>Harvard University Press</publisher-name>.</mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wood</surname> <given-names>R.</given-names></name></person-group> (<year>2020</year>). <article-title>The wrong kind of noise: understanding and valuing the communication of autistic children in schools</article-title>. <source>Educ. Rev.</source> <volume>72</volume>, <fpage>111</fpage>&#x02013;<lpage>130</lpage>. doi: <pub-id pub-id-type="doi">10.1080/00131911.2018.1483895</pub-id></mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yeganeh</surname> <given-names>L. N.</given-names></name> <name><surname>Fenty</surname> <given-names>N. S.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Simpson</surname> <given-names>A.</given-names></name> <name><surname>Hatami</surname> <given-names>M.</given-names></name></person-group> (<year>2025</year>). <article-title>The future of education: a multi-layered metaverse classroom model for immersive and inclusive learning</article-title>. <source>Fut. Internet</source> <volume>17</volume>, <fpage>1</fpage>&#x02013;<lpage>55</lpage>. doi: <pub-id pub-id-type="doi">10.3390/fi17020063</pub-id></mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zahid Iqbal</surname> <given-names>M.</given-names></name> <name><surname>Campbell</surname> <given-names>A. G.</given-names></name></person-group> (<year>2023</year>). <article-title>AGILEST approach: using machine learning agents to facilitate kinesthetic learning in STEM education through real-time touchless hand interaction</article-title>. <source>Telemat. Inf. Rep.</source> <volume>9</volume>:<fpage>100034</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.teler.2022.100034</pub-id></mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Zeng</surname> <given-names>W.</given-names></name> <name><surname>Zhu</surname> <given-names>H.</given-names></name> <name><surname>Qin</surname> <given-names>C.</given-names></name> <name><surname>Wu</surname> <given-names>H.</given-names></name> <name><surname>Cheng</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2025</year>). <source>Application-Driven Value Alignment in Agentic AI Systems: Survey and Perspectives</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2506.09656">http://arxiv.org/abs/2506.09656</ext-link> (Accessed October 19, 2025).</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2339412/overview">Israel Kibirige</ext-link>, University of Limpopo, South Africa</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3163653/overview">Saodat Gulyamova</ext-link>, Bukhara State University, Uzbekistan</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3117783/overview">Ali Geris</ext-link>, Lund University, Sweden</p>
</fn>
</fn-group>
<fn-group>
<fn fn-type="abbr" id="abbr1"><label>Abbreviations:</label><p>SDG, sustainable development goal; SEN, special educational needs; VR, virtual reality; Agentic AI, agentic artificial intelligence; UDL, universal design for learning; CLT, cognitive load theory; UNCRPD, UN convention on the rights of persons with disabilities; SLR, systematic literature review; SLD, specific learning disabilities; WoS, Web of Science; ERIC, Education Resources Information Center.</p></fn></fn-group>
</back>
</article>
