<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1741217</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2026.1741217</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Mini Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Uncovering the mechanisms of virtual reality (VR)-enhanced neuroscience education: the role of eye tracking and facial expression recognition</article-title>
<alt-title alt-title-type="left-running-head">Deng</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2026.1741217">10.3389/frvir.2026.1741217</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Deng</surname>
<given-names>Xue</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3271395"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
</contrib-group>
<aff id="aff1">
<institution>Division of Occupational Therapy, Decker College of Nursing and Health Sciences, Binghamton University</institution>, <city>Binghamton</city>, <state>NY</state>, <country country="US">United States</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Xue Deng, <email xlink:href="mailto:xdeng1@binghamton.edu">xdeng1@binghamton.edu</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-12">
<day>12</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>7</volume>
<elocation-id>1741217</elocation-id>
<history>
<date date-type="received">
<day>07</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>20</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>03</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Deng.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Deng</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-12">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Virtual reality (VR) has demonstrated substantial advantages in neuroscience education, consistently outperforming traditional instructional approaches in enhancing spatial understanding, knowledge retention, and learner engagement. However, despite this robust evidence of effectiveness, most existing studies remain primarily outcome-oriented, focusing on what learners achieve over how learning processes occur within immersive VR environments. Consequently, the cognitive and affective mechanisms that mediate VR-enhanced learning outcomes remain underexplored. This mini-review synthesizes current evidence on the integration of biometric sensing technologies&#x2014;specifically eye tracking and facial expression recognition&#x2014;in VR-based neuroscience education to elucidate the cognitive and affective processes. We examine how eye tracking provides objective indicators of visual attention and cognitive load, while facial expression analysis captures affective states such as curiosity and frustration. The integration of this multimodal data offers a holistic framework to understand the interplay between immersion, attention, and emotion in knowledge acquisition. Furthermore, we discuss significant technical and ethical challenges, including data synchronization, privacy, and measurement reliability. Finally, we outline future directions, emphasizing the potential for artificial intelligence (AI) to create adaptive VR learning systems that respond in real-time to learner biomarkers. This approach lays the groundwork for more mechanism-informed and evidence-aligned design of adaptive XR learning environments.&#x201d;</p>
</abstract>
<kwd-group>
<kwd>affective learning</kwd>
<kwd>biometric sensing</kwd>
<kwd>cognitive load</kwd>
<kwd>eye tracking</kwd>
<kwd>facial expression recognition</kwd>
<kwd>immersive learning</kwd>
<kwd>neuroscience education</kwd>
<kwd>virtual reality</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. Funded by NYS/UUP Joint Labor-management Committee Individual Development Award (IDA) Program.</funding-statement>
</funding-group>
<counts>
<fig-count count="0"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="35"/>
<page-count count="6"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Virtual Reality and Human Behaviour</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>The adoption of virtual reality (VR) in higher education, particularly in medicine and neuroscience, has accelerated due to rising technological affordability and a pressing need to overcome the limitations of traditional teaching tools like textbooks and diagrams (<xref ref-type="bibr" rid="B11">Hamilton et al., 2021</xref>; <xref ref-type="bibr" rid="B9">Garc&#xed;a&#x2010;Robles et al., 2024</xref>; <xref ref-type="bibr" rid="B35">Zhao et al., 2020</xref>; <xref ref-type="bibr" rid="B21">L&#xe9;cuyer et al., 2008</xref>). Meta-analyses of randomized controlled studies confirm that immersive VR leads to moderate, significant improvements in learning outcomes compared to non-immersive methods (<xref ref-type="bibr" rid="B9">Garc&#xed;a&#x2010;Robles et al., 2024</xref>; <xref ref-type="bibr" rid="B35">Zhao et al., 2020</xref>). These findings have motivated widespread adoption of VR-based tools for teaching neuroanatomy, neurophysiology, and clinical reasoning.</p>
<p>Beyond questions of instructional effectiveness, researchers in cognitive psychology, and educational technology have long emphasized the importance of cognitive and affective processes&#x2014;such as attention allocation, cognitive load, motivation, and emotional engagement&#x2014;in shaping learning outcomes. These processes have been extensively examined across a range of learning contexts, including technology-enhanced and immersive environments. However, within VR-based neuroscience education, investigations of these processes have relied predominantly on self-report measures, performance outcomes, or post hoc reflections (<xref ref-type="bibr" rid="B26">Radianti et al., 2020</xref>). As a result, there remains a limited understanding of how learners&#x2019; cognitive and affective states dynamically unfold during immersive VR learning activities, particularly when engaging with spatially complex and cognitively demanding neural content.</p>
<p>To address this gap, biometric sensing technologies can offer a complementary and potentially powerful means. Unlike retrospective self-reports or outcome-based assessments, biometric measures can provide time-resolved indicators of learning-related processes as they occur, thereby supporting more refined analyses of how instructional design, task complexity, and learner interaction influence engagement and understanding. Accordingly, this narrative mini-review posits that the integration of biometric sensors&#x2014;specifically eye tracking and facial expression recognition&#x2014;provides an objective, quantitative approach to investigate the processes of attention and affect that are highly relevant to learning. It synthesizes current research on these biomarkers in VR neuroscience education, contributing psychological, educational, and data-driven insights into meaningful XR learning experiences, providing insight to future directions for developing personalized and effective immersive learning systems.</p>
</sec>
<sec sec-type="methods" id="s2">
<label>2</label>
<title>Methodology</title>
<p>This article adopts a narrative mini-review approach to synthesize representative empirical and theoretical literature on the use of biometric sensing to investigate learning mechanisms in immersive virtual reality (VR)&#x2013;based neuroscience education. Rather than providing an exhaustive or systematic review of VR learning research, the goal is to analytically integrate key strands of work that illuminate how eye tracking and facial expression recognition can be used to examine cognitive and affective processes relevant to learning.</p>
<p>Studies were included if they involved immersive VR or related extended reality (XR) environments for education or training, incorporated eye tracking and/or facial expression analysis, and examined cognitive, attentional, or affective processes relevant to learning. Given the limited number of studies explicitly integrating immersive VR, neuroscience education, and biometric measures, relevant research from adjacent domains&#x2014;such as medical education, anatomy instruction, and simulation-based learning&#x2014;was also considered when it offered transferable methodological or theoretical insights.</p>
<p>Instead of aggregating outcomes quantitatively, this review emphasizes conceptual and methodological synthesis, focusing on how biometric measures have been interpreted, what learning-related constructs they inform, and the limitations of these inferences in immersive VR contexts.</p>
</sec>
<sec id="s3">
<label>3</label>
<title>Summary table</title>
<p>
<xref ref-type="table" rid="T1">Table 1</xref> summarizes representative empirical studies illustrating how eye tracking, pupillometry, and facial expression recognition have been applied in immersive learning contexts relevant to neuroscience education, along with the learning constructs targeted and key methodological limitations. Additional studies are discussed narratively in the synthesized findings.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Summary table.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Study</th>
<th align="center">Learning context</th>
<th align="center">Biometric measure(s)</th>
<th align="center">Target construct(s)</th>
<th align="center">Key findings and limitations</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<xref ref-type="bibr" rid="B33">Wainman et al. (2021)</xref>
</td>
<td align="left">VR-based anatomy learning</td>
<td align="left">Eye tracking (fixations, gaze distribution)</td>
<td align="left">Visual attention; spatial processing</td>
<td align="left">Sustained fixation on task-relevant structures suggests focused attention; gaze data alone could not differentiate deep understanding from surface inspection</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B24">Parong and Mayer (2018)</xref>
</td>
<td align="left">Immersive VR science simulations</td>
<td align="left">Design principles informed by eye tracking</td>
<td align="left">Cognitive load; conceptual understanding</td>
<td align="left">Guided VR improved conceptual learning; cognitive load discussed but not directly measured via biometric indices</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B17">Kiili et al. (2014)</xref>
</td>
<td align="left">Game-based immersive learning</td>
<td align="left">Eye tracking (fixations, scan paths)</td>
<td align="left">Cognitive engagement; search strategies</td>
<td align="left">Eye-movement patterns differentiated novice and expert strategies; fixation interpretation remained task- and context-dependent</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B29">Souchet et al. (2022)</xref>
</td>
<td align="left">Learning with VR HMDs</td>
<td align="left">Eye tracking; pupillometry</td>
<td align="left">Cognitive load; visual fatigue</td>
<td align="left">Pupil dilation correlated with task difficulty and strain; lighting and display factors limited direct inference of mental effort</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B22">Lee et al. (2023)</xref>
</td>
<td align="left">VR procedural training</td>
<td align="left">Pupillometry</td>
<td align="left">Cognitive load regulation</td>
<td align="left">Pupillary responses tracked task complexity; authors caution against attributing pupil changes solely to cognitive load</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B1">Adhanom et al. (2023)</xref>
</td>
<td align="left">Review of eye tracking in VR</td>
<td align="left">Eye tracking (multiple metrics)</td>
<td align="left">Attention; user behavior</td>
<td align="left">Demonstrates feasibility of VR eye tracking while highlighting calibration, motion artifacts, and construct validity challenges</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B19">Li and Deng (2020)</xref>
</td>
<td align="left">Facial expression recognition</td>
<td align="left">Facial expression analysis</td>
<td align="left">Affective valence; arousal</td>
<td align="left">Facial analysis captures affective states but may oversimplify emotion and is sensitive to occlusion and cultural variation</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B34">Wang et al. (2022)</xref>
</td>
<td align="left">Affective computing in education</td>
<td align="left">Facial expression; multimodal sensing</td>
<td align="left">Emotion; motivation</td>
<td align="left">Shows educational promise of affective sensing while noting persistent reliability and interpretation challenges</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B12">Hern&#xe1;ndez-Mustieles et al. (2024)</xref>
</td>
<td align="left">Wearable biosensors in education</td>
<td align="left">Multimodal biometric sensing</td>
<td align="left">Cognitive&#x2013;affective dynamics</td>
<td align="left">Multimodal sensing improves interpretability but increases analytic complexity and synchronization demands in immersive settings</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s4">
<label>4</label>
<title>Synthesized findings</title>
<sec id="s4-1">
<label>4.1</label>
<title>Theoretical foundations of VR-enhanced learning</title>
<p>Across the reviewed literature, constructivism, experiential learning theory, and embodied cognition recur as key interpretive frameworks for understanding how immersive VR supports learning. These theories are commonly applied in VR learning research and offer complementary, testable expectations regarding learners&#x2019; attention, cognitive effort, and affective engagement&#x2014;processes that can be examined using biometric measures (<xref ref-type="bibr" rid="B26">Radianti et al., 2020</xref>; <xref ref-type="bibr" rid="B23">Makransky and Petersen, 2021</xref>).</p>
<p>From a constructivist perspective, learning involves active exploration and selective attention to task-relevant information. In immersive VR neuroscience education, this suggests that learners engaged in knowledge construction should exhibit goal-directed gaze behaviors, such as sustained fixations on relevant anatomical structures and systematic visual exploration, which can be examined using eye-tracking metrics (<xref ref-type="bibr" rid="B6">Duchowski, 2018</xref>; <xref ref-type="bibr" rid="B20">Lai et al., 2013</xref>).</p>
<p>Experiential learning theory emphasizes iterative cycles of concrete experience, reflection, and conceptualization. Biometrically, these phases may be associated with distinct cognitive and affective signatures, including increased pupil dilation during demanding experiential activities or transient expressions of confusion followed by engagement during reflective insight, which can be explored through pupillometry and facial expression analysis (<xref ref-type="bibr" rid="B7">D&#x2019;Mello et al., 2018</xref>; <xref ref-type="bibr" rid="B29">Souchet et al., 2022</xref>).</p>
<p>Embodied cognition highlights the role of bodily interaction in shaping cognition. In VR neuroscience education, embodied manipulation of three-dimensional neural models is expected to involve coordinated patterns of visual attention and action, measurable through eye tracking and complementary affective indicators (<xref ref-type="bibr" rid="B23">Makransky and Petersen, 2021</xref>; <xref ref-type="bibr" rid="B15">Jensen and Konradsen, 2018</xref>).</p>
<p>Together, these frameworks provide an integrated theoretical basis for linking biometric measures to learning-related processes in immersive VR environments and for informing mechanism-oriented instructional design.</p>
</sec>
<sec id="s4-2">
<label>4.2</label>
<title>VR applications as research contexts</title>
<p>VR-based neuroscience education encompasses a range of instructional applications that differ in learning objectives, interaction patterns, and cognitive&#x2013;affective demands. In this review, VR applications are organized into three representative contexts: VR-based neuroanatomy learning, neurophysiology simulation, and clinical or diagnostic training. These contexts were selected because they reflect systematically distinct instructional goals and therefore provide analytically useful contrasts for interpreting biometric measures.</p>
<p>VR-based neuroanatomy learning primarily targets spatial understanding and structural knowledge acquisition (<xref ref-type="bibr" rid="B2">Aland et al., 2023</xref>). In this context, biometric measures can be used to examine how learners allocate visual attention across anatomical structures, the extent to which gaze is directed toward task-relevant features, and how visual exploration strategies evolve with increasing familiarity (<xref ref-type="bibr" rid="B33">Wainman et al., 2021</xref>). Unlike neuroanatomy learning, which focus on static structures, VR-based neurophysiology simulations emphasize dynamic process understanding and causal reasoning but rather static spatial relationships. Learners interact with simulations of neural signaling or circuit-level activity, often manipulating parameters to observe system behavior over time. Compared with neuroanatomy learning, this context places greater demands on temporal integration and cognitive effort, making pupillometry and affective measures particularly informative for examining fluctuations in cognitive load and emotional response during complex conceptual processing (<xref ref-type="bibr" rid="B24">Parong and Mayer, 2018</xref>). Beyond conceptual understanding, VR-based clinical and diagnostic training situates neuroscience knowledge within authentic decision-making contexts, often involving time pressure or uncertainty. This setting supports examination of visual search efficiency, attentional prioritization, and affective regulation, such as stress or confidence during task performance. Eye tracking can differentiate novice and expert gaze strategies, while affective indicators provide insight into emotional responses associated with high-stakes decisions (<xref ref-type="bibr" rid="B27">Shao et al., 2020</xref>).</p>
<p>By distinguishing VR applications according to learning goals and cognitive&#x2013;affective demands, this framework enables context-sensitive interpretation of biometric data and strengthens their analytic value in immersive neuroscience education research.</p>
</sec>
<sec id="s4-3">
<label>4.3</label>
<title>Application of eye tracking technology in VR-integrated neuroscience education</title>
<p>Eye tracking has emerged as a powerful tool for investigating visual attention and cognitive processing in immersive learning environments, offering objective insight into how learners interact with VR-based instructional content (<xref ref-type="bibr" rid="B6">Duchowski, 2018</xref>). Fixation duration is commonly interpreted as an indicator of cognitive processing depth, with longer fixations reflecting more intensive information processing (<xref ref-type="bibr" rid="B17">Kiili et al., 2014</xref>). The &#x201c;quiet eye&#x201d; phenomenon&#x2014;prolonged final fixations preceding action&#x2014;has been associated with planning efficiency and expertise (<xref ref-type="bibr" rid="B32">Vickers et al., 2000</xref>). Saccadic paths and gaze heatmaps further reveal visual search strategies and collective attention patterns that inform interface design and learning efficiency (<xref ref-type="bibr" rid="B6">Duchowski, 2018</xref>).</p>
<p>In addition, Pupillometry offers a psychophysiological measure of cognitive effort that is difficult to consciously control (<xref ref-type="bibr" rid="B29">Souchet et al., 2022</xref>). Pupil dilation reliably reflects increased mental effort during complex tasks, making it a sensitive indicator of cognitive load across learning activities. When combined with fixation data, pupillometry provides a continuous, quantitative index of cognitive engagement that is less influenced by individual differences in visual scanning strategies. In immersive VR neuroscience education, these measures enable objective differentiation between on-task attention to relevant anatomical structures and off-task distraction, offering engagement metrics that extend beyond self-report methods (<xref ref-type="bibr" rid="B16">Just and Carpenter, 2013</xref>).</p>
<p>Combining traditional eye-tracking metrics with pupillometry provides a comprehensive framework for investigating learning mechanisms in immersive VR environments (<xref ref-type="bibr" rid="B18">Krejtz et al., 2018</xref>). This integrated approach can reveal how learners prioritize information and allocate cognitive resources, offering insight into how VR design influences attention, information encoding, and learning processes (<xref ref-type="bibr" rid="B22">Lee et al., 2023</xref>; <xref ref-type="bibr" rid="B31">Van Engen and McLaughlin, 2018</xref>).</p>
<p>However, eye-tracking measures do not constitute direct indicators of attention or learning. Fixation patterns, scan paths, and pupillary responses function as context-dependent signals whose interpretation depends on task structure, instructional design, and theoretical assumptions (<xref ref-type="bibr" rid="B20">Lai et al., 2013</xref>; <xref ref-type="bibr" rid="B13">Holmqvist and Andersson, 2017</xref>; <xref ref-type="bibr" rid="B14">Jarodzka et al., 2017</xref>). Gaze behavior reflects cognitive processing only when analytically situated within a specific learning context, instead of serving as a direct proxy for learning outcomes (<xref ref-type="bibr" rid="B6">Duchowski, 2018</xref>). Similarly, pupillometric responses index mental effort and processing demands but require careful experimental control to avoid conflation with learning gains (<xref ref-type="bibr" rid="B16">Just and Carpenter, 2013</xref>).</p>
</sec>
<sec id="s4-4">
<label>4.4</label>
<title>Application of facial expression technology in VR-integrated neuroscience education</title>
<p>While cognitive processes are central to learning, affective factors&#x2014;including emotions, motivation, and attitudes&#x2014;play an equally critical role in educational outcomes (<xref ref-type="bibr" rid="B34">Wang et al., 2022</xref>). Facial expression recognition is a computer vision&#x2013;based approach that objectively infers affective states by analyzing subtle facial muscle movements to estimate emotional valence and arousal (<xref ref-type="bibr" rid="B19">Li and Deng, 2020</xref>). This method captures both brief micro-expressions reflecting involuntary emotional reactions and sustained emotional trajectories that characterize learners&#x2019; ongoing experiences (<xref ref-type="bibr" rid="B8">Ekman and Friesen, 1978</xref>). Its educational value lies in revealing the dynamic interplay between emotion and cognition: positive states such as curiosity promote intrinsic motivation and deeper engagement, whereas prolonged frustration may hinder learning (<xref ref-type="bibr" rid="B10">Gruber et al., 2014</xref>). Linking affective patterns to learning outcomes is therefore essential for understanding learning as a holistic cognitive&#x2013;emotional process (<xref ref-type="bibr" rid="B1">Adhanom et al., 2023</xref>).</p>
<p>In VR-integrated neuroscience education, the complexity of neuroanatomy and neural pathways can evoke strong emotional responses. Facial expression recognition enables researchers to identify affective transitions, such as shifts from confusion to curiosity during interaction with complex 3D brain models, or sustained frustration indicative of excessive cognitive load. This real-time affective data provides nuanced insight into learners&#x2019; experiences and complements cognitive measures such as eye tracking. However, applying facial expression analysis in VR presents technical challenges. Although tools such as iMotions and OpenFace have improved feasibility, head-mounted displays partially occlude facial features, requiring advanced algorithms for accurate inference (<xref ref-type="bibr" rid="B30">Tao et al., 2005</xref>). Accordingly, facial expression data should be interpreted as probabilistic and complementary. Despite these limitations, its integration is a critical step toward adaptive and responsive immersive learning environments.</p>
</sec>
<sec id="s4-5">
<label>4.5</label>
<title>Multimodal data integration in VR-integrated neuroscience education</title>
<p>The integration of eye tracking and facial expression recognition represents a leading approach within Multimodal Learning Analytics (MMLA), grounded in the view that learning arises from interacting cognitive and affective processes that cannot be fully captured by a single modality (<xref ref-type="bibr" rid="B12">Hern&#xe1;ndez-Mustieles et al., 2024</xref>). In VR-integrated neuroscience education, this multimodal approach holds promise for informing the development of adaptive learning systems, contingent on further empirical validation (<xref ref-type="bibr" rid="B20">Lai et al., 2013</xref>). In principle, such integration enables real-time adaptation of VR environments, for example, by dynamically adjusting the complexity of neuroanatomical models when sustained gaze fixation and affective signals indicate excessive cognitive load (<xref ref-type="bibr" rid="B21">L&#xe9;cuyer et al., 2008</xref>).</p>
<p>However, substantial challenges remain. These include the technical difficulty of synchronizing heterogeneous data streams and the methodological complexity of fusing them into coherent analytical models (<xref ref-type="bibr" rid="B4">Chango et al., 2022</xref>; <xref ref-type="bibr" rid="B25">Qiu et al., 2023</xref>). Moreover, developing validated computational models that reliably infer learning states across diverse learners remains a critical barrier to scalable, biometric-driven immersive education (<xref ref-type="bibr" rid="B7">D&#x2019;Mello et al., 2018</xref>).</p>
</sec>
</sec>
<sec sec-type="discussion" id="s5">
<label>5</label>
<title>Discussion</title>
<p>This narrative mini-review highlights an emerging methodological emphasis in VR-based neuroscience education research: a growing shift from evaluating learning outcomes alone toward examining the cognitive and affective processes that underlie learning in immersive environments (<xref ref-type="bibr" rid="B26">Radianti et al., 2020</xref>; <xref ref-type="bibr" rid="B23">Makransky and Petersen, 2021</xref>). Instead of representing a paradigm shift, this trend reflects increasing recognition that understanding how learners engage with complex virtual representations is essential for evidence-based instructional design (<xref ref-type="bibr" rid="B11">Hamilton et al., 2021</xref>). Eye tracking and facial expression recognition offer promising, complementary tools for probing these mechanisms, provided they are applied with appropriate theoretical grounding and methodological caution.</p>
<p>Eye-tracking measures&#x2014;including fixation duration, scan paths, and gaze distribution&#x2014;have been widely used to infer visual attention and information-seeking strategies in immersive learning tasks (<xref ref-type="bibr" rid="B6">Duchowski, 2018</xref>; <xref ref-type="bibr" rid="B20">Lai et al., 2013</xref>). In VR-based neuroanatomy and simulation contexts, these metrics offer insight into how learners allocate attention to spatially complex structures, how visual strategies differ between novices and experts, and how interface design shapes attentional guidance (<xref ref-type="bibr" rid="B33">Wainman et al., 2021</xref>; <xref ref-type="bibr" rid="B17">Kiili et al., 2014</xref>). When combined with pupillometry, eye tracking can also provide indirect indicators of cognitive effort and load regulation during demanding spatial or procedural tasks (<xref ref-type="bibr" rid="B29">Souchet et al., 2022</xref>; <xref ref-type="bibr" rid="B22">Lee et al., 2023</xref>). However, these measures do not provide direct access to cognitive states: fixation patterns may reflect deep processing, confusion, or perceptual difficulty depending on task demands, while pupil dilation is influenced by lighting conditions, emotional arousal, and headset display properties (<xref ref-type="bibr" rid="B16">Just and Carpenter, 2013</xref>; <xref ref-type="bibr" rid="B18">Krejtz et al., 2018</xref>; <xref ref-type="bibr" rid="B1">Adhanom et al., 2023</xref>). Accordingly, interpretations must be anchored in task design, theoretical expectations, and triangulation with additional data sources.</p>
<p>Facial expression recognition provides a complementary window into the affective dimension of learning, capturing emotional valence and arousal that are often inaccessible through self-report alone (<xref ref-type="bibr" rid="B7">D&#x2019;Mello et al., 2018</xref>; <xref ref-type="bibr" rid="B34">Wang et al., 2022</xref>). In immersive VR neuroscience education&#x2014;where learners engage with unfamiliar and cognitively demanding content&#x2014;affective states such as curiosity, frustration, or disengagement can meaningfully shape engagement and persistence (<xref ref-type="bibr" rid="B10">Gruber et al., 2014</xref>). While facial expression analysis may help identify affective patterns associated with engagement or difficulty, important limitations remain (<xref ref-type="bibr" rid="B19">Li and Deng, 2020</xref>). Facial expressions are indirect proxies for internal emotional states, vary across individuals and cultural contexts, and are technically constrained in VR by headset-induced facial occlusion and partial visibility (<xref ref-type="bibr" rid="B1">Adhanom et al., 2023</xref>; <xref ref-type="bibr" rid="B30">Tao et al., 2005</xref>). These considerations underscore the need for cautious, context-sensitive interpretation and validation within specific educational settings.</p>
<p>The integration of eye tracking and facial expression recognition within a multimodal learning analytics framework represents a promising direction for future research in VR-based neuroscience education (<xref ref-type="bibr" rid="B12">Hern&#xe1;ndez-Mustieles et al., 2024</xref>). Multimodal approaches acknowledge that learning arises from interacting cognitive and affective processes and that no single biometric measure can adequately capture this complexity (<xref ref-type="bibr" rid="B7">D&#x2019;Mello et al., 2018</xref>). When temporally synchronized and theoretically grounded, combined gaze and affective data may support more nuanced interpretations of learner states, such as distinguishing transient confusion that precedes insight from sustained frustration indicative of excessive cognitive load (<xref ref-type="bibr" rid="B20">Lai et al., 2013</xref>; <xref ref-type="bibr" rid="B4">Chango et al., 2022</xref>). At the same time, multimodal integration introduces additional challenges, including sensor synchronization, analytic complexity, and the risk of overinterpreting biometric signals in the absence of sufficient empirical validation (<xref ref-type="bibr" rid="B25">Qiu et al., 2023</xref>).</p>
<p>Beyond technical considerations, several broader methodological and ethical issues warrant attention. Methodologically, there is a pressing need for greater standardization in the collection, processing, and reporting of biometric data in VR learning research, particularly with respect to calibration procedures, environmental controls, and analytic transparency (<xref ref-type="bibr" rid="B1">Adhanom et al., 2023</xref>; <xref ref-type="bibr" rid="B5">Cheng et al., 2016</xref>). Ethically, the continuous collection of biometric and affective data raises concerns related to privacy, informed consent, data security, and the potential normalization of learner surveillance (<xref ref-type="bibr" rid="B3">Baniasadi et al., 2020</xref>; <xref ref-type="bibr" rid="B28">Shum et al., 2019</xref>). These concerns are especially salient in educational settings and must be addressed through robust governance frameworks and learner-centered design principles.</p>
<p>Taken together, the reviewed literature suggests that biometric sensing can meaningfully enrich mechanism-oriented research in VR neuroscience education when applied judiciously. As opposed to serving as definitive indicators of attention, cognitive load, or emotion, eye tracking and facial expression recognition are best understood as context-sensitive, complementary measures whose interpretive value depends on theoretical grounding and methodological rigor (<xref ref-type="bibr" rid="B26">Radianti et al., 2020</xref>; <xref ref-type="bibr" rid="B23">Makransky and Petersen, 2021</xref>). Future research should prioritize longitudinal designs, explicit construct validation, and interdisciplinary collaboration among educators, neuroscientists, learning scientists, and ethicists to translate biometric insights into actionable and ethically responsible design principles for immersive neuroscience education.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>XD: Funding acquisition, Writing &#x2013; original draft, Project administration, Writing &#x2013; review and editing, Formal Analysis, Software, Methodology, Visualization, Supervision, Investigation, Validation, Conceptualization, Resources, Data curation.</p>
</sec>
<sec sec-type="COI-statement" id="s8">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/844875/overview">Rabindra Ratan</ext-link>, Michigan State University, United States</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1341919/overview">Kuo-Ting Huang</ext-link>, University of Pittsburgh, United States</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2547574/overview">Cristobal Rodolfo Guerra-Tamez</ext-link>, University of Monterrey, Mexico</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Adhanom</surname>
<given-names>I. B.</given-names>
</name>
<name>
<surname>MacNeilage</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Folmer</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Eye tracking in virtual reality: a broad review of applications and challenges</article-title>. <source>Virtual Real.</source> <volume>27</volume> (<issue>2</issue>), <fpage>1481</fpage>&#x2013;<lpage>1505</lpage>. <pub-id pub-id-type="doi">10.1007/s10055-022-00738-z</pub-id>
<pub-id pub-id-type="pmid">37621305</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aland</surname>
<given-names>R. C.</given-names>
</name>
<name>
<surname>Hugo</surname>
<given-names>H. J.</given-names>
</name>
<name>
<surname>Battle</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Donkin</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>McDonald</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>McGowan</surname>
<given-names>H.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>A plethora of choices: an anatomists&#x2019; practical perspectives for the selection of digital anatomy resources</article-title>. <source>Smart Learn Environ.</source> <volume>10</volume> (<issue>1</issue>), <fpage>66</fpage>. <pub-id pub-id-type="doi">10.1186/s40561-023-00285-3</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Baniasadi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ayyoubzadeh</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Mohammadzadeh</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Challenges and practical considerations in applying virtual reality in medical education and treatment</article-title>. <source>Oman Medical Journal</source> <volume>35</volume> (<issue>3</issue>), <fpage>e125</fpage>. <pub-id pub-id-type="doi">10.5001/omj.2020.43</pub-id>
<pub-id pub-id-type="pmid">32489677</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chango</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Lara</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>Cerezo</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Romero</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A review on data fusion in multimodal learning analytics and educational data mining</article-title>. <source>WIREs Data Min and Knowl.</source> <volume>12</volume> (<issue>4</issue>), <fpage>e1458</fpage>. <pub-id pub-id-type="doi">10.1002/widm.1458</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kessler</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Mackinnon</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>T. P.</given-names>
</name>
<name>
<surname>Nadkarni</surname>
<given-names>V. M.</given-names>
</name>
<name>
<surname>Hunt</surname>
<given-names>E. A.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Reporting guidelines for health care simulation research: extensions to the CONSORT and STROBE statements</article-title>. <source>Adv. Simul.</source> <volume>1</volume> (<issue>1</issue>), <fpage>25</fpage>. <pub-id pub-id-type="doi">10.1186/s41077-016-0025-y</pub-id>
<pub-id pub-id-type="pmid">29449994</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Duchowski</surname>
<given-names>A. T.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Gaze-based interaction: a 30 year retrospective</article-title>. <source>Comput. and Graph.</source> <volume>73</volume>, <fpage>59</fpage>&#x2013;<lpage>69</lpage>. <pub-id pub-id-type="doi">10.1016/j.cag.2018.04.002</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>D&#x2019;Mello</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kappas</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Gratch</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The affective computing approach to affect measurement</article-title>. <source>Emot. Rev.</source> <volume>10</volume> (<issue>2</issue>), <fpage>174</fpage>&#x2013;<lpage>183</lpage>. <pub-id pub-id-type="doi">10.1177/1754073917696583</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ekman</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Friesen</surname>
<given-names>W. V.</given-names>
</name>
</person-group> (<year>1978</year>). <article-title>Facial action coding system</article-title>. <source>Environ. Psychol. and Nonverbal Behav.</source> <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://psycnet.apa.org/doiLanding?">https://psycnet.apa.org/doiLanding?</ext-link> (Accessed September 05, 2025)</comment>.<pub-id pub-id-type="doi">10.1037/t27734-000</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Garc&#xed;a&#x2010;Robles</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Cort&#xe9;s&#x2010;P&#xe9;rez</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Nieto&#x2010;Esc&#xe1;mez</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Garc&#xed;a&#x2010;L&#xf3;pez</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Obrero&#x2010;Gait&#xe1;n</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Osuna&#x2010;P&#xe9;rez</surname>
<given-names>M. C.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Immersive virtual reality and augmented reality in anatomy education: a systematic review and meta&#x2010;analysis</article-title>. <source>Anat. Sci. Ed.</source> <volume>17</volume> (<issue>3</issue>), <fpage>514</fpage>&#x2013;<lpage>528</lpage>. <pub-id pub-id-type="doi">10.1002/ase.2397</pub-id>
<pub-id pub-id-type="pmid">38344900</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gruber</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Gelman</surname>
<given-names>B. D.</given-names>
</name>
<name>
<surname>Ranganath</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>States of curiosity modulate hippocampus-dependent learning <italic>via</italic> the dopaminergic circuit</article-title>. <source>Neuron</source> <volume>84</volume> (<issue>2</issue>), <fpage>486</fpage>&#x2013;<lpage>496</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuron.2014.08.060</pub-id>
<pub-id pub-id-type="pmid">25284006</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hamilton</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>McKechnie</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Edgerton</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wilson</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Immersive virtual reality as a pedagogical tool in education: a systematic literature review of quantitative learning outcomes and experimental design</article-title>. <source>J. Comput. Educ.</source> <volume>8</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1007/s40692-020-00169-2</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hern&#xe1;ndez-Mustieles</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Lima-Carmona</surname>
<given-names>Y. E.</given-names>
</name>
<name>
<surname>Pacheco-Ram&#xed;rez</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Mendoza-Armenta</surname>
<given-names>A. A.</given-names>
</name>
<name>
<surname>Romero-G&#xf3;mez</surname>
<given-names>J. E.</given-names>
</name>
<name>
<surname>Cruz-G&#xf3;mez</surname>
<given-names>C. F.</given-names>
</name>
<etal/>
</person-group> (<year>2024</year>). <article-title>Wearable biosensor technology in education: a systematic review</article-title>. <source>Sensors</source> <volume>24</volume> (<issue>8</issue>), <fpage>2437</fpage>. <pub-id pub-id-type="doi">10.3390/s24082437</pub-id>
<pub-id pub-id-type="pmid">38676053</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Holmqvist</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Andersson</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Eye tracking: a comprehensive guide to methods</article-title>. <source>Paradigms Measures</source> <volume>3</volume> (<issue>5</issue>). <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.academia.edu/download/43856150/Eye_tracking_A_comprehensive_guide_to_me20160318-12093-1jy8omr.pdf">https://www.academia.edu/download/43856150/Eye_tracking_A_comprehensive_guide_to_me20160318-12093-1jy8omr.pdf</ext-link> (Accessed January 19, 2025)</comment>.</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="web">
<person-group person-group-type="author">
<name>
<surname>Jarodzka</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Gruber</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Holmqvist</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Eye tracking in educational science: theoretical frameworks and research agendas</article-title>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.utupub.fi/bitstream/handle/10024/159534/2959-12375-1-PB.pdf?sequence=1">https://www.utupub.fi/bitstream/handle/10024/159534/2959-12375-1-PB.pdf?sequence&#x3d;1</ext-link> (Accessed January 19, 2025)</comment>.</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jensen</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Konradsen</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>A review of the use of virtual reality head-mounted displays in education and training</article-title>. <source>Educ. Inf. Technol.</source> <volume>23</volume> (<issue>4</issue>), <fpage>1515</fpage>&#x2013;<lpage>1529</lpage>. <pub-id pub-id-type="doi">10.1007/s10639-017-9676-0</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Just</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Carpenter</surname>
<given-names>P. A.</given-names>
</name>
</person-group> (<year>2013</year>). &#x201c;<article-title>The intensity dimension of thought: pupillometric indices of sentence processing</article-title>,&#x201d; in <source>Reading and language processing</source> (<publisher-name>Hove, United Kingdom: Psychology Press</publisher-name>), <fpage>182</fpage>&#x2013;<lpage>211</lpage>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.taylorfrancis.com/chapters/edit/10.4324/9781315799445-9/intensity-dimension-thought-marcel-adam-patricia-carpenter">https://www.taylorfrancis.com/chapters/edit/10.4324/9781315799445-9/intensity-dimension-thought-marcel-adam-patricia-carpenter</ext-link> (Accessed November 01, 2025)</comment>.</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="web">
<person-group person-group-type="author">
<name>
<surname>Kiili</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ketamo</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Kickmeier-Rust</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Eye tracking in game-based learning research and game design</article-title>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://proforis.phsg.ch/handle/20.500.14111/3339">https://proforis.phsg.ch/handle/20.500.14111/3339</ext-link> (Accessed September 05, 2025)</comment>.</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Krejtz</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Duchowski</surname>
<given-names>A. T.</given-names>
</name>
<name>
<surname>Niedzielska</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Biele</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Krejtz</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Eye tracking cognitive load using pupil diameter and microsaccades with fixed gaze</article-title>. <source>PloS One</source> <volume>13</volume> (<issue>9</issue>), <fpage>e0203629</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0203629</pub-id>
<pub-id pub-id-type="pmid">30216385</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Deng</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Deep facial expression recognition: a survey</article-title>. <source>IEEE Transactions Affective Computing</source> <volume>13</volume> (<issue>3</issue>), <fpage>1195</fpage>&#x2013;<lpage>1215</lpage>. <pub-id pub-id-type="doi">10.1109/taffc.2020.2981446</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lai</surname>
<given-names>M. L.</given-names>
</name>
<name>
<surname>Tsai</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>F. Y.</given-names>
</name>
<name>
<surname>Hsu</surname>
<given-names>C. Y.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>T. C.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>S. W. Y.</given-names>
</name>
<etal/>
</person-group> (<year>2013</year>). <article-title>A review of using eye-tracking technology in exploring learning from 2000 to 2012</article-title>. <source>Educ. Research Review</source> <volume>10</volume>, <fpage>90</fpage>&#x2013;<lpage>115</lpage>. <pub-id pub-id-type="doi">10.1016/j.edurev.2013.10.001</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>L&#xe9;cuyer</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Lotte</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Reilly</surname>
<given-names>R. B.</given-names>
</name>
<name>
<surname>Leeb</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Hirose</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Slater</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Brain-computer interfaces, virtual reality, and videogames</article-title>. <source>Computer.</source> <volume>41</volume> (<issue>10</issue>), <fpage>66</fpage>&#x2013;<lpage>72</lpage>. <pub-id pub-id-type="doi">10.1109/MC.2008.410</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>J. Y.</given-names>
</name>
<name>
<surname>de Jong</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Donkers</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Jarodzka</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>van Merri&#xeb;nboer</surname>
<given-names>J. J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Measuring cognitive load in virtual reality training <italic>via</italic> pupillometry</article-title>. <source>IEEE Trans. Learn. Technol.</source> <volume>17</volume>, <fpage>704</fpage>&#x2013;<lpage>710</lpage>. <pub-id pub-id-type="doi">10.1109/tlt.2023.3326473</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Makransky</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Petersen</surname>
<given-names>G. B.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The cognitive affective model of immersive learning (CAMIL): a theoretical research-based model of learning in immersive virtual reality</article-title>. <source>Educ. Psychol. Rev. 2021 Sept.</source> <volume>33</volume> (<issue>3</issue>), <fpage>937</fpage>&#x2013;<lpage>958</lpage>. <pub-id pub-id-type="doi">10.1007/s10648-020-09586-2</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Parong</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Mayer</surname>
<given-names>R. E.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Learning science in immersive virtual reality</article-title>. <source>J. Educational Psychology</source> <volume>110</volume> (<issue>6</issue>), <fpage>785</fpage>&#x2013;<lpage>797</lpage>. <pub-id pub-id-type="doi">10.1037/edu0000241</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qiu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Mart&#xed;nez-S&#xe1;nchez</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Arias-S&#xe1;nchez</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Rashdi</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>External multi-modal imaging sensor calibration for sensor fusion: a review</article-title>. <source>Inf. Fusion</source> <volume>97</volume>, <fpage>101806</fpage>. <pub-id pub-id-type="doi">10.1016/j.inffus.2023.101806</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Radianti</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Majchrzak</surname>
<given-names>T. A.</given-names>
</name>
<name>
<surname>Fromm</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wohlgenannt</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A systematic review of immersive virtual reality applications for higher education: design elements, lessons learned, and research agenda</article-title>. <source>Comput. and Education</source> <volume>147</volume>, <fpage>103778</fpage>. <pub-id pub-id-type="doi">10.1016/j.compedu.2019.103778</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shao</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Qian</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Le Zhuang</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Virtual reality technology for teaching neurosurgery of skull base tumor</article-title>. <source>BMC Med. Educ.</source> <volume>20</volume> (<issue>1</issue>), <fpage>3</fpage>. <pub-id pub-id-type="doi">10.1186/s12909-019-1911-5</pub-id>
<pub-id pub-id-type="pmid">31900135</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shum</surname>
<given-names>S. B.</given-names>
</name>
<name>
<surname>Ferguson</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Martinez-Maldonado</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Human-centred learning analytics</article-title>. <source>J. Learning Analytics</source> <volume>6</volume> (<issue>2</issue>), <fpage>1</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1111/bjet.13442</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Souchet</surname>
<given-names>A. D.</given-names>
</name>
<name>
<surname>Philippe</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lourdeaux</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Leroy</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Measuring visual fatigue and cognitive load <italic>via</italic> eye tracking while learning with virtual reality head-mounted displays: a review</article-title>. <source>Int. J. Human&#x2013;Computer Interact.</source> <volume>38</volume> (<issue>9</issue>), <fpage>801</fpage>&#x2013;<lpage>824</lpage>. <pub-id pub-id-type="doi">10.1080/10447318.2021.1976509</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Tao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Tao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Picard</surname>
<given-names>R. W.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Affective computing: a review</article-title>. In: <source>Affective computing and intelligent interaction</source>. <publisher-loc>Berlin, Heidelberg</publisher-loc>: <publisher-name>Springer Berlin Heidelberg</publisher-name>; editors <person-group person-group-type="editor">
<name>
<surname>Kittler</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kleinberg</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Mattern</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Mitchell</surname>
<given-names>J. C.</given-names>
</name>
</person-group> vol. <volume>3784</volume>). <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="http://link.springer.com/10.1007/11573548_125">http://link.springer.com/10.1007/11573548_125</ext-link>(Accessed May 06, 2025)</comment>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Van Engen</surname>
<given-names>K. J.</given-names>
</name>
<name>
<surname>McLaughlin</surname>
<given-names>D. J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Eyes and ears: using eye tracking and pupillometry to understand challenges to speech recognition</article-title>. <source>Hear. Research</source> <volume>369</volume>, <fpage>56</fpage>&#x2013;<lpage>66</lpage>. <pub-id pub-id-type="doi">10.1016/j.heares.2018.04.013</pub-id>
<pub-id pub-id-type="pmid">29801981</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vickers</surname>
<given-names>J. N.</given-names>
</name>
<name>
<surname>Rodrigues</surname>
<given-names>S. T.</given-names>
</name>
<name>
<surname>Edworthy</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Quiet eye and accuracy in the dart throw</article-title>. <source>Int. J. Sports Vis.</source> <volume>6</volume> (<issue>1</issue>), <fpage>30</fpage>&#x2013;<lpage>36</lpage>.</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wainman</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Aggarwal</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Birk</surname>
<given-names>S. K.</given-names>
</name>
<name>
<surname>Gill</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Hass</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Fenesi</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Virtual dissection: an interactive anatomy learning tool</article-title>. <source>Anat. Sci.</source> <volume>14</volume> (<issue>6</issue>), <fpage>788</fpage>&#x2013;<lpage>798</lpage>. <pub-id pub-id-type="doi">10.1002/ase.2035</pub-id>
<pub-id pub-id-type="pmid">33185976</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Tao</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Liotta</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>X.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>A systematic review on affective computing: emotion models, databases, and recent advances</article-title>. <source>Inf. Fusion</source> <volume>83</volume>, <fpage>19</fpage>&#x2013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1016/j.inffus.2022.03.009</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>The effectiveness of virtual reality-based technology on anatomy teaching: a meta-analysis of randomized controlled studies</article-title>. <source>BMC Med. Educ.</source> <volume>20</volume> (<issue>1</issue>), <fpage>127</fpage>. <pub-id pub-id-type="doi">10.1186/s12909-020-1994-z</pub-id>
<pub-id pub-id-type="pmid">32334594</pub-id>
</mixed-citation>
</ref>
</ref-list>
</back>
</article>