<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1540406</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2025.1540406</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Virtual Reality</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Effects of interactive loading interfaces for virtual reality game environments on time perception, cognitive load, and emotions</article-title>
<alt-title alt-title-type="left-running-head">Huang et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2025.1540406">10.3389/frvir.2025.1540406</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Huang</surname>
<given-names>Yi-Ting</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1791393/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hsu</surname>
<given-names>Chih-Chieh</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Wang</surname>
<given-names>Tzu-Hsuan</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2914376/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Interaction Design</institution>, <institution>National Taipei University of Technology (Taipei Tech)</institution>, <addr-line>Taipei</addr-line>, <country>Taiwan</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Doctoral Program in Design</institution>, <institution>College of Design</institution>, <institution>National Taipei University of Technology (Taipei Tech)</institution>, <addr-line>Taipei</addr-line>, <country>Taiwan</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/437425/overview">Min Li</ext-link>, Xi&#x2019;an Jiaotong University, China</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2408804/overview">Marta Mondellini</ext-link>, National Research Council (CNR), Italy</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/318610/overview">Jean Botev</ext-link>, University of Luxembourg, Luxembourg</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Tzu-Hsuan Wang, <email>winnie9874@gmail.com</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>15</day>
<month>05</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>6</volume>
<elocation-id>1540406</elocation-id>
<history>
<date date-type="received">
<day>05</day>
<month>12</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>22</day>
<month>04</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Huang, Hsu and Wang.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Huang, Hsu and Wang</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>As virtual reality (VR) games are data-heavy, not only do they take time to load, but wearing a VR headset while waiting can also cause anxiety and agitation in players. This study thus focused on the design of VR loading interfaces, which is an underexplored area in VR research. We investigated how different levels of interaction and visual stimulation affect users&#x2019; psychological and behavioral responses. Drawing on theories of time perception and the stimulus-organism-response (SOR) model, we designed loading interfaces for a VR headset integrated with a physiological data collection module. Questionnaire surveys were also used to collect data from 58 participants on their emotions, time perceptions, and cognitive load experience with interactive and non-interactive VR loading interfaces. The results showed that interactive interfaces shortened users&#x2019; perception of waiting times, and increased positive emotions and decreased negative emotions while users waited for the game to load. Additionally, users of interactive interfaces were less negatively affected by visual stimulation, whereas in non-interactive interfaces, visual stimulation improved time perception and emotional response. These findings expand the application of SOR theory to VR waiting experiences and highlight the importance of interactivity and visual elements in optimizing the waiting experience, thus contributing to a better understanding of user needs in VR environments.</p>
</abstract>
<kwd-group>
<kwd>virtual reality</kwd>
<kwd>time perception</kwd>
<kwd>cognitive load</kwd>
<kwd>visual stimulation</kwd>
<kwd>loading interface</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Virtual Reality and Human Behaviour</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>To illustrate the theory of relativity, Einstein said, <italic>&#x201c;Put your hand on a hot stove for a minute, and it seems like an hour; sit with a pretty girl for an hour, and it seems like a minute&#x201d;</italic> (<xref ref-type="bibr" rid="B141">Garson, 2014</xref>). This statement reflects how external factors influence an individual&#x2019;s perception of time. This phenomenon is especially evident in virtual reality (VR) environments. The immersive nature of VR typically reduces bodily awareness and enhances user engagement when they are actively participating in the environment, leading to a phenomenon known as time compression (<xref ref-type="bibr" rid="B83">Mullen and Davidenko, 2021</xref>; <xref ref-type="bibr" rid="B113">van der Ham et al., 2019</xref>), in which users underestimate the passage of time. However, time distortion can include expansion as well as compression. According to <xref ref-type="bibr" rid="B86">Niknam et al. (2024)</xref>, time perception is influenced by user engagement and the dynamic nature of the VR environment. Active users typically experience time compression, whereas passive users, especially in dynamic VR environments, may feel detached from their surroundings, resulting in time expansion.</p>
<p>When VR games are loading, users are restricted by the headsets but are not actively engaged. This can intensify the distortion of time perception, triggering strong negative emotions (<xref ref-type="bibr" rid="B50">Heidrich et al., 2020</xref>; <xref ref-type="bibr" rid="B51">Igarz&#xe1;bal et al., 2021</xref>). However, emotional impact is not limited to negativity. When users experience enjoyment, relaxation, or satisfaction while waiting for the game to load, they tend to pay less attention to the passage of time and thus perceive it as moving faster (<xref ref-type="bibr" rid="B122">Witowska et al., 2020</xref>). This aligns with the concept of flow state, where individuals enter an optimal experience characterized by deep immersion, pleasure, and diminished awareness of time (<xref ref-type="bibr" rid="B31">Csikszentmihalyi and Csikszentmihalyi, 1992</xref>; <xref ref-type="bibr" rid="B51">Igarz&#xe1;bal et al., 2021</xref>; <xref ref-type="bibr" rid="B88">Pedroli et al., 2018</xref>). Thus, when people fully engage in an enjoyable activity, they become less conscious of time (<xref ref-type="bibr" rid="B12">Block and Gruber, 2014</xref>). This means VR waiting experiences could be improved through the careful design of loading interfaces.</p>
<p>Since the &#x201c;Year of VR&#x201d; in 2016, VR devices have been rapidly commercialized (<xref ref-type="bibr" rid="B134">Cellan-Jones, 2016</xref>; <xref ref-type="bibr" rid="B147">Smith, 2016</xref>), and the integration of related technologies is expected to bring revolutionary changes to the gaming industry (<xref ref-type="bibr" rid="B146">Sarkar et al., 2024</xref>; <xref ref-type="bibr" rid="B145">Vorrink et al., 2024</xref>). Despite recent progress in evaluating VR user experiences across different regions and industries (<xref ref-type="bibr" rid="B94">Rhiu et al., 2020</xref>), there remains a significant gap in our understanding of VR waiting experiences (<xref ref-type="bibr" rid="B50">Heidrich et al., 2020</xref>). In particular, it seems important to identify factors affecting time perception (<xref ref-type="bibr" rid="B113">van der Ham et al., 2019</xref>), cognitive load, which is a widely-used evaluation metric for human-computer interactions (HCI) (<xref ref-type="bibr" rid="B28">Chiossi et al., 2022</xref>; <xref ref-type="bibr" rid="B59">Kleygrewe et al., 2024</xref>; <xref ref-type="bibr" rid="B60">Kosch et al., 2023</xref>), and emotions (<xref ref-type="bibr" rid="B8">Batistatou et al., 2022</xref>; <xref ref-type="bibr" rid="B15">Bosman et al., 2024</xref>; <xref ref-type="bibr" rid="B35">Dey et al., 2022</xref>; <xref ref-type="bibr" rid="B138">Dubovi, 2022</xref>; <xref ref-type="bibr" rid="B53">Jacucci, 2017</xref>). These variables require further examination within the context of VR loading interfaces. In user interface design, the loading interface often provides feedback to users during wait times after an operation is performed. In traditional media, some loading interfaces incorporate animations. It has been found that faster animations can reduce perceived wait times and enhance user satisfaction (<xref ref-type="bibr" rid="B84">Myers, 1985</xref>; <xref ref-type="bibr" rid="B102">S&#xf6;derstr&#xf6;m et al., 2018</xref>).</p>
<p>The highly interactive and immersive nature of VR allows users to notice more visual details for a more dynamic experience; however, it also increases the cognitive load on users (<xref ref-type="bibr" rid="B39">Fisher et al., 2018</xref>). The characteristics of VR hardware and non-interactive loading interfaces contribute to user distress. For example, users have reported feeling &#x201c;trapped&#x201d; inside the headset during waiting experiences (<xref ref-type="bibr" rid="B51">Igarz&#xe1;bal et al., 2021</xref>). Indeed, in some large-scale VR games, the waiting time for players to enter the game is usually more than 1&#xa0;min, which can cause cognitive friction for users.</p>
<p>While it is clear that optimizing VR waiting experiences has substantial potential for improving VR experiences (<xref ref-type="bibr" rid="B64">Lang, 2020</xref>), current research on loading interfaces is scarce, particularly regarding the specific effects of different levels of visual stimulation on users&#x2019; internal and external states. This study aimed to fill this research gap, including investigating how various levels of visual stimulation intensity in VR gaming environments affect users. Thus, our research questions are as follows:<list list-type="simple">
<list-item>
<p>RQ1: How do the interactivity of the VR interface and the length of the waiting time affect time perception and emotions?</p>
</list-item>
<list-item>
<p>RQ2: Is there a relationship between the intensity of visual stimulation and users&#x2019; time perception, emotions, and cognitive load during waiting?</p>
</list-item>
</list>
</p>
<p>We applied the attentional gate model and stimulus-organism-response (SOR) theory to develop our conceptual model. The attentional gate model highlights the critical role of attention in time perception (<xref ref-type="bibr" rid="B12">Block and Gruber, 2014</xref>). SOR theory posits that external stimuli can influence individuals to respond in a specific way (<xref ref-type="bibr" rid="B80">Mehrabian and Russell, 1974</xref>). SOR theory has been widely applied to the analysis of behavioral dynamics in extended reality (XR) technologies such as virtual tourism and sightseeing applications (<xref ref-type="bibr" rid="B57">Kim et al., 2020</xref>; <xref ref-type="bibr" rid="B66">Latifi et al., 2024</xref>; <xref ref-type="bibr" rid="B107">Surovaya et al., 2020</xref>; <xref ref-type="bibr" rid="B132">Zhu et al., 2023</xref>). It is a useful framework for determining how different levels of visual stimulation intensity affect internal states and external responses. This study extends the scope of SOR theory in pursuit of a more detailed understanding of the interactions among stimuli, individuals, and responses in the context of VR loading interfaces. Our findings on the psychological mechanisms of VR interactive interfaces suggest practical guidelines for game design to optimize player experiences during game loading.</p>
</sec>
<sec id="s2">
<title>2 Literature review and hypothesis development</title>
<sec id="s2-1">
<title>2.1 Overview of research on VR user experiences</title>
<p>Since 2016, VR technology has rapidly matured as a subset of XR, exhibiting accelerated and diversified applications across industries (<xref ref-type="bibr" rid="B21">Chen et al., 2020</xref>; <xref ref-type="bibr" rid="B115">Velev and Zlateva, 2017</xref>). However, the success of VR technology relies not only on advancements in hardware and software but also on user perception and interaction experiences. Research has shown that presence and comfort influence XR experiences (<xref ref-type="bibr" rid="B81">Mondellini et al., 2022</xref>). Therefore, user experience in VR can be assessed through indicators such as presence, workload, usability, flow, and latency.</p>
<p>Existing studies offer valuable insights into topics such as interface design, cognitive load, time perception, and emotional impact. For instance, <xref ref-type="bibr" rid="B9">Bi et al. (2024)</xref> examined how scene-switching and time visualization methods (e.g., electronic clock vs. bomb countdown) affect creativity during brainstorming activities, highlighting how time distortion in VR environments can influence user experience and task fluidity. <xref ref-type="bibr" rid="B49">Hartfill et al. (2024)</xref> found that cognitive load decreases when users perceive more control over their virtual avatars, contributing to time compression. Meanwhile, research by <xref ref-type="bibr" rid="B20">Che et al. (2025)</xref>, <xref ref-type="bibr" rid="B114">van Weelden et al. (2024)</xref>, and <xref ref-type="bibr" rid="B34">De Witte et al. (2024)</xref> suggests that higher levels of immersion do not always enhance user experience, as excessive immersion can lead to increased cognitive load or discomfort. <xref ref-type="bibr" rid="B7">Bartyzel et al. (2025)</xref> and <xref ref-type="bibr" rid="B58">Kim et al. (2025)</xref> analyzed user performance, workload, and subjective experiences in VR training, offering valuable insights for future VR training system designs.</p>
<p>Given the expanding scope of this field, a comprehensive understanding of user experience is crucial (<xref ref-type="bibr" rid="B94">Rhiu et al., 2020</xref>). We adopted the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) protocol (see <ext-link ext-link-type="uri" xlink:href="http://prisma-statement.org/">http://prisma-statement.org</ext-link>) to systematically review current research on VR user experience. <xref ref-type="fig" rid="F1">Figure 1</xref> describes our screening process. Our objective was to analyze studies examining the impact of VR design on user experience (UX), cognitive load, emotion, and time perception. As <xref ref-type="bibr" rid="B106">Suh and Prophet (2018)</xref> recommended, limiting the search to a single database ensures reproducibility, rigor, and transparency while mitigating inconsistencies caused by variations in search functions and algorithms.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>PRISMA flow diagram of selection procedure for systematic literature review.</p>
</caption>
<alt-text>Flowchart showing the PRISMA selection process for a systematic literature review on VR user experience. Initially, 342 records were identified. Then, 41 were excluded by publication date, 152 by irrelevant abstracts, and 32 full texts not related to VR design impact. 15 studies were included.</alt-text>
<graphic xlink:href="frvir-06-1540406-g001.tif"/>
</fig>
<p>We conducted a keyword search in the Scopus bibliographic database using the following search terms: (&#x201c;VR&#x201d; OR &#x201c;virtual reality&#x201d;), with (&#x201c;UX&#x201d; OR &#x201c;user experience&#x201d; OR &#x201c;usability&#x201d;), (&#x201c;interface&#x201d; OR &#x201c;user interface&#x201d;), (&#x201c;emotion&#x2a;&#x201c;), (&#x201c;cognitive load&#x201d; OR &#x201c;cognit&#x2a;&#x201d; OR &#x201c;workload&#x201d;), (&#x201c;time perception&#x201d;), and (&#x201c;design&#x201d;). The initial search was performed on 19 November 2024. As 2016 is widely recognized as the year in which VR started (<xref ref-type="bibr" rid="B21">Chen et al., 2020</xref>; <xref ref-type="bibr" rid="B115">Velev and Zlateva, 2017</xref>), only literature published in English from 2016 was included. To ensure relevance, there was a focus on the subject areas of computer science and social science. Note that this review excluded surveys, editorials, conference papers, and documents without abstracts or full texts. Initially, 342 documents met the search criteria. After non-compliant documents were removed, 61 underwent a full-text review, resulting in 15 relevant UX studies for VR design.</p>
<p>
<xref ref-type="table" rid="T1">Table 1</xref> summarizes selected literature discussing user experiences in VR. These studies span various countries and industries, including entertainment, healthcare, education, gaming, and social applications. Topics include research on visual stimulation intensity in VR environments (<xref ref-type="bibr" rid="B4">Asish et al., 2022</xref>; <xref ref-type="bibr" rid="B8">Batistatou et al., 2022</xref>; <xref ref-type="bibr" rid="B28">Chiossi et al., 2022</xref>; <xref ref-type="bibr" rid="B67">Latini et al., 2024</xref>), multimodal interfaces (<xref ref-type="bibr" rid="B36">Dzardanova et al., 2024</xref>; <xref ref-type="bibr" rid="B53">Jacucci, 2017</xref>; <xref ref-type="bibr" rid="B124">Yuan et al., 2023</xref>), auditory stimulation (<xref ref-type="bibr" rid="B15">Bosman et al., 2024</xref>; <xref ref-type="bibr" rid="B90">Picard et al., 2023</xref>), physiological signals (<xref ref-type="bibr" rid="B91">Qu et al., 2022</xref>; <xref ref-type="bibr" rid="B92">Raees and Ullah, 2020</xref>), dissimilar avatars (<xref ref-type="bibr" rid="B26">Cheymol et al., 2023</xref>), and the number of users in VR environments (<xref ref-type="bibr" rid="B10">Birt and Vasilevski, 2021</xref>). Despite significant progress, many research gaps remain in interface design. For example, researchers have investigated waiting experiences in mobile apps (<xref ref-type="bibr" rid="B23">Chen and Li, 2022</xref>; <xref ref-type="bibr" rid="B24">Cheng et al., 2023</xref>; <xref ref-type="bibr" rid="B89">Pibernik et al., 2023</xref>) and customer service robots (<xref ref-type="bibr" rid="B121">Wintersberger et al., 2020</xref>); however, waiting experiences in VR devices have yet to be examined (<xref ref-type="bibr" rid="B50">Heidrich et al., 2020</xref>). As previously mentioned, existing HCI models may be insufficient to fully explain the complexities of VR environments. UX research can contribute to refining our understanding by empirically investigating ways to enhance user satisfaction, reduce cognitive load and adverse effects, inform VR system development and design, establish standardized evaluation methods, and explore multimodal sensory interactions (<xref ref-type="bibr" rid="B81">Mondellini et al., 2022</xref>). Therefore, UX research should be considered essential in the development of any VR application.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Selected studies on VR user experience.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Authors</th>
<th align="left">Title</th>
<th align="left">Objective</th>
<th align="left">Country</th>
<th align="left">Industry</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<xref ref-type="bibr" rid="B67">Latini et al. (2024)</xref>
</td>
<td align="left">Investigating the impact of greenery elements in office environments on cognitive performance, visual attention, and distraction: an eye-tracking pilot study in virtual reality</td>
<td align="left">Exploring the impact of greenery elements in office environments on cognitive performance, visual attention, and distraction</td>
<td align="left">Italy</td>
<td align="left">Architecture and Design</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B36">Dzardanova et al. (2024)</xref>
</td>
<td align="left">Exploring the impact of non-verbal cues on user experience in immersive virtual reality</td>
<td align="left">Investigating how non-verbal cues (such as body movements and facial expressions) affect user experience in immersive VR</td>
<td align="left">Greece</td>
<td align="left">Social Games</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B15">Bosman et al. (2024)</xref>
</td>
<td align="left">The effect of audio on the experience in virtual reality: a scoping review</td>
<td align="left">Exploring the application of audio in VR and its impact on user experience</td>
<td align="left">Norway</td>
<td align="left">Entertainment, Games, Education</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B124">Yuan et al. (2023)</xref>
</td>
<td align="left">MEinVR: Multimodal interaction techniques in immersive exploration</td>
<td align="left">Examining multimodal interaction techniques to explore 3D molecular data in VR</td>
<td align="left">China</td>
<td align="left">Medical</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B90">Picard et al. (2023)</xref>
</td>
<td align="left">Rhythmic Stimuli and Time Experience in Virtual Reality</td>
<td align="left">Investigating how rhythmic stimuli in VR affect time experience and task performance</td>
<td align="left">Luxembourg</td>
<td align="left">Training, Education</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B26">Cheymol et al. (2023)</xref>
</td>
<td align="left">Beyond my Real Body: Characterization, Impacts, Applications and Perspectives of &#x2018;Dissimilar&#x2019; Avatars in Virtual Reality</td>
<td align="left">Exploring how using dissimilar avatars (different from the user&#x2019;s appearance) in VR affects user experience, including interaction, perception, and behavior changes</td>
<td align="left">France</td>
<td align="left">Entertainment, Games, Education</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B4">Asish et al. (2022)</xref>
</td>
<td align="left">Detecting distracted students in educational VR environments using machine learning on eye gaze data</td>
<td align="left">Investigating student distraction in VR environments and classifying distraction levels using machine learning on eye gaze data</td>
<td align="left">United States</td>
<td align="left">Education</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B28">Chiossi et al. (2022)</xref>
</td>
<td align="left">Virtual Reality Adaptation Using Electrodermal Activity to Support the User Experience</td>
<td align="left">Exploring how physiological adaptation systems in VR adjust user experience through electrodermal activity (EDA) analysis</td>
<td align="left">Germany</td>
<td align="left">Education, Training</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B8">Batistatou et al. (2022)</xref>
</td>
<td align="left">Virtual Reality to Evaluate the Impact of Colorful Interventions and Nature Elements on Spontaneous Walking, Gaze, and Emotion</td>
<td align="left">Evaluating the impact of colorful ground markings and green environments in urban settings on walking speed, gaze behavior, and emotional state using VR</td>
<td align="left">France</td>
<td align="left">Design, Architecture</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B109">Tastan et al. (2022)</xref>
</td>
<td align="left">Using handheld user interface and direct manipulation for architectural modeling in immersive virtual reality: An exploratory study</td>
<td align="left">Exploring two modeling methods for architectural models in VR: handheld user interfaces (HUI) and direct manipulation (DM)</td>
<td align="left">Turkey</td>
<td align="left">Design, Architecture</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B35">Dey et al. (2022)</xref>
</td>
<td align="left">Effects of interacting with facial expressions and controllers in different virtual environments on presence, usability, affect, and neurophysiological signals</td>
<td align="left">Investigating the impact of interacting with facial expressions and handheld controllers in various VR environments on presence, usability, emotional response, and neurophysiological signals</td>
<td align="left">Australia</td>
<td align="left">Entertainment, Games, Education</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B91">Qu et al. (2022)</xref>
</td>
<td align="left">Bio-physiological-signals-based VR cybersickness detection</td>
<td align="left">Exploring how physiological signals can detect cybersickness in VR in real-time, using deep learning models to quantify these factors</td>
<td align="left">China</td>
<td align="left">VR Design</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B10">Birt and Vasilevski (2021)</xref>
</td>
<td align="left">Comparison of Single and Multiuser Immersive Mobile Virtual Reality Usability in Construction Education</td>
<td align="left">Comparing the usability of single-user and multi-user mobile immersive virtual reality (MUVR) in construction education</td>
<td align="left">Australia</td>
<td align="left">Education</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B92">Raees and Ullah (2020)</xref>
</td>
<td align="left">THE-3DI: Tracing head and eyes for 3D interactions: An interaction technique for virtual environments</td>
<td align="left">Exploring head and eye movements for 3D interaction in VR, primarily based on eye position and blinking</td>
<td align="left">Pakistan</td>
<td align="left">Education</td>
</tr>
<tr>
<td align="left">
<xref ref-type="bibr" rid="B53">Jacucci (2017)</xref>
</td>
<td align="left">Toward affective social interaction in VR</td>
<td align="left">Exploring how multimodal synthesis in VR can enhance emotional-social interaction and how to recognize and influence emotions</td>
<td align="left">Finland</td>
<td align="left">Entertainment, Games</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2-2">
<title>2.2 Stimulus-organism-response (SOR) theory</title>
<p>SOR theory is a psychological model widely applied in consumer behavior research. Its core concept posits that a stimulus influences an individual, leading to a response (<xref ref-type="bibr" rid="B80">Mehrabian and Russell, 1974</xref>). According to <xref ref-type="bibr" rid="B52">Jacoby (2002)</xref>, the stimulus (S) represents the environment an individual encounters at a specific moment, while the organism (O) encompasses various internal states such as attitudes, beliefs, values, motivations, personality, knowledge, experiences, emotions, tendencies, and cognition. <xref ref-type="bibr" rid="B116">Vieira (2013)</xref> further explains that the response (R) reflects an individual&#x2019;s willingness or intention to enter or exit a particular environment. Later, <xref ref-type="bibr" rid="B11">Bitner (1992)</xref> incorporated cognition and physiology to expand the SOR theory to the service domain. Past studies have used SOR theory to elucidate behavioral dynamics in XR technologies (<xref ref-type="bibr" rid="B78">Mala Kalaiarasan et al., 2024</xref>) and to examine the impact of VR on service perception, emotions, and behavior. Examples include virtual presentations of tourist hotels (<xref ref-type="bibr" rid="B107">Surovaya et al., 2020</xref>), virtual tourism experiences (<xref ref-type="bibr" rid="B57">Kim et al., 2020</xref>; <xref ref-type="bibr" rid="B66">Latifi et al., 2024</xref>; <xref ref-type="bibr" rid="B132">Zhu et al., 2023</xref>), and museum exhibition experiences (<xref ref-type="bibr" rid="B19">Chang et al., 2018</xref>). Thus, SOR theory is an appropriate framework for the exploration of users&#x2019; perceptions and responses to VR stimuli. The current study is the first to apply this framework to VR game-loading interfaces.</p>
<p>
<xref ref-type="fig" rid="F2">Figure 2</xref> shows the research framework of the current study.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Research framework.</p>
</caption>
<alt-text>Diagram illustrating the study&#x2032;s research framework based on the Stimulus-Organism-Response (SOR) model. It maps how variables such as interactivity, waiting time, and visual stimulation influence internal states like time perception and emotion, leading to a response in cognitive load.</alt-text>
<graphic xlink:href="frvir-06-1540406-g002.tif"/>
</fig>
<p>This study used SOR theory to explore the psychological and behavioral dynamics of waiting experiences in VR games. Similar to previous studies that used specific designs as environmental stimuli (<xref ref-type="bibr" rid="B19">Chang et al., 2018</xref>; <xref ref-type="bibr" rid="B57">Kim et al., 2020</xref>), this study considers scene interactivity, waiting length, visual stimulation intensity, and background environments. Following <xref ref-type="bibr" rid="B42">Godefroit-Winkel et al. (2022)</xref> and <xref ref-type="bibr" rid="B105">Su et al. (2020)</xref>, this study viewed individual perception as an internal processing outcome, reflecting users&#x2019; subjective time experience and perceived emotions in response to various stimuli in VR environments. Additionally, this study includes cognitive load as a behavioral response, as measured by physiological signals collected using the VR headset. <xref ref-type="bibr" rid="B106">Suh and Prophet (2018)</xref> also considered cognitive overload a reaction in their systematic review of the applications of SOR theory to immersive technologies.</p>
</sec>
<sec id="s2-3">
<title>2.3 Interface design and time perceptions when waiting</title>
<sec id="s2-3-1">
<title>2.3.1 Interactivity of VR interface</title>
<p>Games are highly interactive by nature. Interactive gamified visual stimuli within a loading interface are more likely to reduce players&#x2019; perception of time spent waiting than does a general design (<xref ref-type="bibr" rid="B71">Li et al., 2020</xref>). The Japanese game company NAMCO published &#x201c;Ridge Racer&#x201d; in 1993, which included the function of playing mini-games in the loading interface. NAMCO in fact applied for a patent for this function (U.S. Patent No. U.S. 5,718,632A). In the game &#x201c;Bayonetta,&#x201d; a practice field function was added to the loading interface. Additionally, interface interactivity stimulates different emotional responses in users (<xref ref-type="bibr" rid="B53">Jacucci, 2017</xref>; <xref ref-type="bibr" rid="B100">&#x160;&#x137;ilters et al., 2023</xref>). For example, the Google Doodles on the Google search engine homepage have evolved to include various interactive games. These doodles commemorate events and individuals, increasing their interactivity (<xref ref-type="bibr" rid="B16">Britten, 2020</xref>), while conveying emotions through gameplay (<xref ref-type="bibr" rid="B33">de Carvalho et al., 2013</xref>).</p>
<p>As VR games represent a relatively new technology, there remains much room for improvement. Among the various VR applications, most of the loading interfaces are similar to those designed for traditional computer games. That is, they are primarily two-dimensional (2D) and non-interactive. For example, the game &#x201c;Robo Recall&#x201d; presents 2D characters on its loading interface (<xref ref-type="bibr" rid="B64">Lang, 2020</xref>). While game companies are attempting to diversify this content, the immersive properties of this media have not yet been fully exploited for loading interfaces. The current paper explores the potential for three-dimensional (3D) interactive loading interfaces to enhance the user experience. Evidence of this potential has been demonstrated by previous studies; for instance, 3D product models and advertising videos have been inserted into the loading interface of some VR games, allowing users to walk around and interact with the 3D models (<xref ref-type="bibr" rid="B74">Liu, 2017</xref>). <xref ref-type="bibr" rid="B123">Wu et al. (2022)</xref> also added 3D models to the loading interface of a VR game.</p>
<p>Time perception refers to an individual&#x2019;s subjective perception of time without external measuring tools, influenced by environment, attention, and biological clocks (<xref ref-type="bibr" rid="B62">Lamotte et al., 2012</xref>; <xref ref-type="bibr" rid="B79">Matthews and Meck, 2016</xref>). Research on time perception and judgment is typically categorized into two approaches: prospective timing, where participants know that they need to estimate time, and retrospective timing, where time estimation is unexpectedly requested after a stimulus or activity ends (<xref ref-type="bibr" rid="B79">Matthews and Meck, 2016</xref>). This study focuses on prospective timing, which relies on experienced duration (<xref ref-type="bibr" rid="B14">Block and Zakay, 1997</xref>) and is influenced by perception, attention, and memory processes&#x2014;key aspects examined in this review (<xref ref-type="bibr" rid="B14">Block and Zakay, 1997</xref>).</p>
<p>One of the most widely-applied models for prospective time estimation is the attentional gate model (AGM) (<xref ref-type="bibr" rid="B13">Block and Zakay, 1996</xref>; <xref ref-type="bibr" rid="B127">Zakay and Block, 1997</xref>; <xref ref-type="bibr" rid="B126">Zakay and Block, 1995</xref>), which posits that time perception is shaped by cognitive resource allocation. AGM hypothesizes the existence of a cognitive counter in the brain that records time pulses emitted by a pacemaker, with these pulses representing the passage of time. That is, attention functions as a gate that regulates the number of pulses passing through. When attention is focused on time itself, the gate remains open, leading to a slower perception of time. Conversely, when attention is diverted to other stimuli, the gate closes, causing time to appear to pass more quickly (<xref ref-type="bibr" rid="B126">Zakay and Block, 1995</xref>; <xref ref-type="bibr" rid="B128">Zakay and Hornik, 1991</xref>).</p>
<p>According to <xref ref-type="bibr" rid="B129">Zakay et al. (1983)</xref>, attentional allocation is crucial in subjective time estimation. When attention is directed toward non-temporal information (e.g., visual or auditory stimuli), perceived waiting time tends to shorten. In recent years, many studies in the field of HCI have leveraged this theory, designing rich non-temporal visual and auditory stimuli in loading interfaces to divert user attention and reduce perceived wait times (<xref ref-type="bibr" rid="B22">Chen and Li, 2020</xref>; <xref ref-type="bibr" rid="B61">Kurusathianpong and Tangmanee, 2018</xref>; <xref ref-type="bibr" rid="B69">Lee et al., 2017</xref>; <xref ref-type="bibr" rid="B130">Zhang et al., 2023</xref>). <xref ref-type="bibr" rid="B83">Mullen and Davidenko (2021)</xref> found that VR environments can induce a time compression effect, potentially due to a reduction in bodily awareness within VR. <xref ref-type="bibr" rid="B110">Unruh et al. (2021)</xref> further validated the applicability of AGM in VR, demonstrating that the presence of a virtual avatar can distract users from tracking time, resulting in time compression. These studies indicate that multiple factors, including bodily awareness, attentional allocation, and the design of non-temporal information, influence time perception in VR.</p>
<p>In recent years, the design of human-computer loading interfaces has primarily been founded on studies that have demonstrated that modifying the visual and auditory non-temporal stimuli in loading interfaces can direct users&#x2019; attention toward these sensory inputs, effectively shortening their subjective perception of waiting time (<xref ref-type="bibr" rid="B128">Zakay and Hornik, 1991</xref>). That is, users&#x2019; perceived wait duration can be altered by introducing appropriate stimuli during waiting periods. These findings provide the theoretical foundation for this study, enabling an exploration of how temporal psychology can be leveraged to reduce subjective time perception during waiting experiences in VR games.</p>
<p>As early as the 1990s, scholars proposed that 10&#xa0;s is the threshold for an acceptable waiting time in human-computer interactions (<xref ref-type="bibr" rid="B85">Nielsen, 1994</xref>). However, in the gaming field, waiting times are often longer than 10&#xa0;s (<xref ref-type="bibr" rid="B77">Lozano, 2022</xref>). Compared to traditional computer equipment, VR devices need to render images at higher resolutions and frame rates (<xref ref-type="bibr" rid="B131">Zhou and Popescu, 2024</xref>), leading to longer loading times. This can cause negative emotions, such as frustration (<xref ref-type="bibr" rid="B50">Heidrich et al., 2020</xref>).</p>
<p>Based on the above review and the identified research gaps, this study proposes the following hypotheses:<list list-type="simple">
<list-item>
<p>H1a: Interactive loading interfaces will result in shorter time perception than non-interactive loading interfaces.</p>
</list-item>
<list-item>
<p>H1b: Interactive loading interfaces elicit more positive emotional responses than non-interactive loading interfaces.</p>
</list-item>
<list-item>
<p>H2a: As waiting time increases, the difference in time perception between interactive and non-interactive loading interfaces will become more pronounced.</p>
</list-item>
<list-item>
<p>H2b: As waiting time increases, the difference in emotional responses between interactive and non-interactive loading interfaces will become more substantial.</p>
</list-item>
</list>
</p>
</sec>
<sec id="s2-3-2">
<title>2.3.2 Time perception and visual stimulation intensity</title>
<p>Compared to the physical world, people generally perceive time to pass faster in VR environments (<xref ref-type="bibr" rid="B113">van der Ham et al., 2019</xref>), which means waiting experiences can feel more boring in these environments (<xref ref-type="bibr" rid="B51">Igarz&#xe1;bal et al., 2021</xref>). To minimize VR loading time, studies have focused on reducing data transfer usage (<xref ref-type="bibr" rid="B2">Alhilal et al., 2024</xref>), while others have adjusted rendering smoothness (<xref ref-type="bibr" rid="B72">Liang et al., 2023</xref>; <xref ref-type="bibr" rid="B131">Zhou and Popescu, 2024</xref>). Researchers have also found that adding interactive elements to VR loading interfaces can alleviate negative emotions associated with waiting (<xref ref-type="bibr" rid="B50">Heidrich et al., 2020</xref>). In their study exploring time perception in VR with various sensory stimuli, <xref ref-type="bibr" rid="B90">Picard et al., 2023</xref> discovered that rhythmic stimulation in VR environments accelerates time perception. <xref ref-type="bibr" rid="B73">Liao et al. (2020)</xref> found that adjusting visual and auditory zeitgebers&#x2014;temporal cues in VR environments that influence biological rhythms and time perception, such as lighting and ticking sounds&#x2014;can significantly affect users&#x2019; sense of time. Additionally, cognitive load plays a crucial role in shaping time perception. Their study utilized the dual-task theory to evaluate the impact of cognitive load on both time perception and presence in VR environments. Regarding visual stimulation intensity, some scholars found that richer environmental details in VR provide more engaging visual stimulation, helping players to lose track of time (<xref ref-type="bibr" rid="B75">Lofca et al., 2023</xref>).</p>
<p>
<xref ref-type="bibr" rid="B82">Mostajeran et al. (2023)</xref> compared the effects of an abstract forest with a more realistic one on players&#x2019; time perceptions. They found that players spent more time observing details in the realistic forest. This suggests that realistic details in a VR environment attract more attention and thus reduce the perception of time passing. Other studies have investigated how visual dynamics can reduce time perception, showing that fast-moving stimuli lengthen time perception compared to stationary stimuli (<xref ref-type="bibr" rid="B17">Brown, 1995</xref>; <xref ref-type="bibr" rid="B56">Kanai et al., 2006</xref>; <xref ref-type="bibr" rid="B63">Landeck et al., 2023</xref>; <xref ref-type="bibr" rid="B101">Skylark, 2011</xref>). For example, <xref ref-type="bibr" rid="B54">Jording et al. (2022)</xref> found that flashing particles similar to the rapid changes of the starry night sky successfully reduced players&#x2019; perception of time passing.</p>
<p>When designing VR content, it is essential to consider how to minimize pressure on users&#x2019; cognitive resources to reduce time perception. This study explored the visual elements that reduce time perception in VR and their effects on players during game loading. Based on the above literature review, we propose the following hypotheses:<list list-type="simple">
<list-item>
<p>H3a: Different levels of visual stimulation intensity in interactive loading interfaces will affect time perception differently.</p>
</list-item>
<list-item>
<p>H3b: Different levels of visual stimulation intensity in non-interactive loading interfaces will affect time perception differently.</p>
</list-item>
</list>
</p>
</sec>
</sec>
<sec id="s2-4">
<title>2.4 Visual stimulation intensity and emotions when waiting</title>
<p>Emotions are crucial in user research due to their influence on behavior, perception, and cognition to varying degrees (<xref ref-type="bibr" rid="B103">Somarathna et al., 2023</xref>), as well as on distortion of users&#x2019; time perception (<xref ref-type="bibr" rid="B32">Cui et al., 2023</xref>). <xref ref-type="bibr" rid="B41">Gable et al. (2022)</xref> found that the motivational direction of emotions affects time perception. For example, emotions associated with approach motivation, such as anger, can accelerate time perception due to strong goal orientation. Conversely, emotions linked to avoidance motivation can extend time perception, showing how emotions regulate time perception through their motivational direction. Emotional responses have been shown to enhance presence in VR (<xref ref-type="bibr" rid="B6">Ba&#xf1;os et al., 2004</xref>; <xref ref-type="bibr" rid="B95">Riva et al., 2007</xref>) and significantly impact the enjoyment and quality of the VR experience (<xref ref-type="bibr" rid="B120">Wienrich et al., 2018</xref>).</p>
<p>Past VR research has focused on how sensory environments influence emotional arousal. For example, <xref ref-type="bibr" rid="B8">Batistatou et al. (2022)</xref> found that participants experienced more pleasant emotions in green environments. <xref ref-type="bibr" rid="B113">van der Ham et al. (2019)</xref> noted that appropriate sound design could enhance users&#x2019; positive emotions or reinforce specific emotional states, aiding deeper connections with VR content and narratives. <xref ref-type="bibr" rid="B73">Liao et al. (2020)</xref> confirmed the impact of multisensory (visual and auditory) stimuli on emotions. <xref ref-type="bibr" rid="B53">Jacucci (2017)</xref> studied how multimodal synthesis (such as haptic and facial expression stimuli) can recognize and influence participants&#x2019; emotions. Previous studies have explored the effective use of VR as an emotion induction mechanism (<xref ref-type="bibr" rid="B15">Bosman et al., 2024</xref>; <xref ref-type="bibr" rid="B35">Dey et al., 2022</xref>; <xref ref-type="bibr" rid="B103">Somarathna et al., 2023</xref>); however, few studies have focused on emotional changes during VR waiting times. Waiting in VR is often more unpleasant for gamers because they are awkwardly confined by the headset (<xref ref-type="bibr" rid="B133">Zwiezen, 2020</xref>). As such, users cannot divert their attention to escape the loading screen, potentially triggering negative emotional experiences (<xref ref-type="bibr" rid="B50">Heidrich et al., 2020</xref>).</p>
<p>Particularly in large VR games, waiting times often exceed users&#x2019; comfortable threshold of 10&#xa0;s. In these games, it can take more than a minute for a player to enter. This prolonged waiting creates cognitive friction, where users face unexpected outcomes from seemingly intuitive interfaces or functions. The mismatch between expected and actual results leads to frustration (<xref ref-type="bibr" rid="B37">Ericson, 2022</xref>), often triggering negative emotions during these long waiting experiences. <xref ref-type="bibr" rid="B25">Cheng et al. (2024)</xref> found that interactions with features like doodles and emojis in VR can enhance positive emotions. This suggests that different interaction features can evoke varying emotional states. Based on the aforementioned literature, it seems different stimuli trigger different emotional values. Thus, this study proposes the following hypotheses:<list list-type="simple">
<list-item>
<p>H4a: In interactive interfaces, different visual stimulation intensities during waiting will lead to differences in positive emotions.</p>
</list-item>
<list-item>
<p>H4b: In non-interactive interfaces, different visual stimulation intensities during waiting will lead to differences in positive emotions.</p>
</list-item>
<list-item>
<p>H4c: In interactive interfaces, different visual stimulation intensities during waiting will lead to differences in negative emotions.</p>
</list-item>
<list-item>
<p>H4d: In non-interactive interfaces, different visual stimulation intensities during waiting will lead to differences in negative emotions.</p>
</list-item>
</list>
</p>
</sec>
<sec id="s2-5">
<title>2.5 Visual stimulation intensity and cognitive load when waiting</title>
<p>Cognitive load refers to the limited capacity of human cognitive resources, which can be seen as working memory. Executing tasks occupies working memory. A well-designed system allows users to focus on the task and spend less mental effort on irrelevant aspects (<xref ref-type="bibr" rid="B108">Sweller, 1988</xref>). <xref ref-type="bibr" rid="B59">Kleygrewe et al. (2024)</xref> pointed out that users&#x2019; VR experience and gaming frequency influence their cognitive resource consumption in VR. Compared to typical HCI scenarios, VR covers the user&#x2019;s visual field, creating unique effects on human vision. Compared to the experience of using a 2D graphic website, VR exposes users to more sensory stimuli, requiring them to allocate more attention and cognitive resources to the game (<xref ref-type="bibr" rid="B39">Fisher et al., 2018</xref>) This affects the size of the cognitive load experienced by users.</p>
<p>There are still research gaps regarding cognitive load in the HCI field. As more systems compete for users&#x2019; attention, a better understanding of cognitive workload becomes critical in HCI research (<xref ref-type="bibr" rid="B60">Kosch et al., 2023</xref>). As the processing of visual stimuli requires a certain amount of cognitive resources, different types and complexities of visual messages lead to varying levels of cognitive load (<xref ref-type="bibr" rid="B112">Valtchanov and Ellard, 2015</xref>). Cognitive load has often been used as a metric to explore the level of impact of visual stimulation on users. For example, engaging visual details in VR environments may cause additional cognitive load (<xref ref-type="bibr" rid="B30">Cruz et al., 2023</xref>). When cognitive load is increased, users often experience symptoms similar to visual fatigue (<xref ref-type="bibr" rid="B43">Gowrisankaran et al., 2012</xref>). When green elements are present in a VR environment, users experience lower cognitive load and more efficient information searching, improving overall performance (<xref ref-type="bibr" rid="B67">Latini et al., 2024</xref>). <xref ref-type="bibr" rid="B28">Chiossi et al. (2022)</xref> also found that reducing visual complexity can improve cognitive load. Additionally, previous research has shown that users facing higher cognitive load during waiting perceive time as passing more quickly (<xref ref-type="bibr" rid="B23">Chen and Li, 2022</xref>; <xref ref-type="bibr" rid="B140">Lallemand and Gronier, 2012</xref>). The literature thus confirms that visual resources such as visual stimulation intensity and spatial vision utilize working memory. Interactive environments may divert users&#x2019; attention, affecting cognitive load (<xref ref-type="bibr" rid="B98">Shelton et al., 2021</xref>). Therefore, improving visual presentation efficiency and reducing cognitive resource usage are crucial from a design perspective. This study thus uses cognitive load as an evaluation index to explore the impact of visual stimulation on users. Based on the above theories, this study proposes the following hypotheses:<list list-type="simple">
<list-item>
<p>H5a: In interactive loading interfaces, different visual stimulation intensities will lead to differences in cognitive load.</p>
</list-item>
<list-item>
<p>H5b: In non-interactive loading interfaces, different visual stimulation intensities will lead to differences in cognitive load.</p>
</list-item>
</list>
</p>
<p>Methods for measuring cognitive load are generally classified into physiological indicators (<xref ref-type="bibr" rid="B44">Gupta et al., 2019</xref>; <xref ref-type="bibr" rid="B68">Lee, 2014</xref>; <xref ref-type="bibr" rid="B104">Souchet et al., 2022</xref>) and self-report assessments (<xref ref-type="bibr" rid="B98">Shelton et al., 2021</xref>). Physiological measurements capture cognitive load through eye-tracking, heart rate variability (HRV), electroencephalography (EEG), respiration, and electrodermal activity (EDA), providing objective insights into cognitive states (<xref ref-type="bibr" rid="B87">Paas et al., 2003</xref>; <xref ref-type="bibr" rid="B119">Whelan, 2007</xref>).</p>
<p>Self-report assessments primarily rely on questionnaires, the most well-known being the NASA Task Load Index (NASA-TLX) developed by <xref ref-type="bibr" rid="B48">Hart and Staveland (1988)</xref>. Originally designed to evaluate pilots&#x2019; mental and physical workload, this questionnaire includes six dimensions: mental demand, physical demand, temporal demand, performance, effort, and frustration (<xref ref-type="bibr" rid="B48">Hart and Staveland, 1988</xref>). In VR cognitive load research, NASA-TLX has been widely used for subjective workload measurement (<xref ref-type="bibr" rid="B9">Bi et al., 2024</xref>; <xref ref-type="bibr" rid="B20">Che et al., 2025</xref>; <xref ref-type="bibr" rid="B27">Chiossi et al., 2025</xref>; <xref ref-type="bibr" rid="B34">De Witte et al., 2024</xref>; <xref ref-type="bibr" rid="B49">Hartfill et al., 2024</xref>; <xref ref-type="bibr" rid="B58">Kim et al., 2025</xref>; <xref ref-type="bibr" rid="B114">van Weelden et al., 2024</xref>; <xref ref-type="bibr" rid="B117">Vorwerg-Gall et al., 2023</xref>). Derived versions include the Raw Task Load Index (RTLX) (<xref ref-type="bibr" rid="B47">Hart, 2006</xref>), which has been broadly validated (<xref ref-type="bibr" rid="B76">Lovasz-Bukvova et al., 2021</xref>; <xref ref-type="bibr" rid="B96">Rodr&#xed;guez-Fern&#xe1;ndez et al., 2024</xref>), and the Simulation Task Load Index (SIM-TLX), which incorporates task complexity and situational stress to better align with VR cognitive load measurement (<xref ref-type="bibr" rid="B46">Harris et al., 2020</xref>; <xref ref-type="bibr" rid="B111">Urbano et al., 2024</xref>).</p>
<p>Some scholars have raised concerns about the applicability of self-report questionnaires. <xref ref-type="bibr" rid="B18">Buchner et al. (2025)</xref> argued that contextual constraints limit self-report measures, whereas physiological measurements enable non-intrusive cognitive state assessment (<xref ref-type="bibr" rid="B45">Halbig and Latoschik, 2021</xref>). <xref ref-type="bibr" rid="B40">Foy and Chapman (2018)</xref> similarly highlighted that self-reports suffer from subjectivity, lack of real-time synchronization, and an inability to simultaneously capture subjective ratings during simulation-based training. Additionally, <xref ref-type="bibr" rid="B60">Kosch et al. (2023)</xref> suggested that the widespread use of NASA-TLX in HCI applications may be more a result of historical precedent and convenience rather than because it is the optimal measurement tool. They argued that the questionnaire is prone to individual biases and lacks tailored design and empirical validation for HCI environments. In contrast, physiological signal measurements provide a more comprehensive approach to capturing real-time feedback (<xref ref-type="bibr" rid="B125">Zagermann et al., 2016</xref>). Furthermore, physiological indicators of cognitive load represent genuine physiological responses to real-world stimuli, aligning with the SOR theory framework adopted in this study (<xref ref-type="bibr" rid="B106">Suh and Prophet, 2018</xref>).</p>
<p>This study thus used the HP Reverb G2 Omnicept, a VR headset with physiological signal measurement capabilities. Cognitive load was measured on a scale from 0 to one through a machine-learning model built on data from 738 subjects, as detailed in HP&#x2019;s white paper (<xref ref-type="bibr" rid="B29">Company, 2021</xref>). In reports from the HP research team, their classification accuracy reaches 79.08% (<xref ref-type="bibr" rid="B99">Siegel et al., 2021</xref>). In recent years, many VR studies have utilized this headset for cognitive load measurement (<xref ref-type="bibr" rid="B1">Ahmadi et al., 2023</xref>; <xref ref-type="bibr" rid="B65">Lataifeh et al., 2024</xref>; <xref ref-type="bibr" rid="B93">Reddy et al., 2022</xref>). <xref ref-type="bibr" rid="B93">Reddy et al. (2022)</xref> validated the feasibility of using non-invasive sensors, such as the HP Reverb G2 Omnicept, for estimating cognitive load in VR. Their study also identified pupil diameter variation and fixation count as reliable indicators for assessing task complexity and cognitive load.</p>
</sec>
</sec>
<sec id="s3">
<title>3 Pilot study: effects of interactive VR loading interface on time perception and emotions</title>
<sec id="s3-1">
<title>3.1 Research methods</title>
<p>The pilot study involved semi-structured interviews with five game development experts to design the experimental system. Subsequently, researchers recruited 20 participants to explore the differences in users&#x2019; time estimates and emotions between interactive and non-interactive loading interfaces under varying waiting times.</p>
<sec id="s3-1-1">
<title>3.1.1 Expert interviews</title>
<p>The experimental design was based on the real-world needs and purposes of game development. The research team consulted with five industry experts with over 5&#xa0;years of experience, including game developers, senior game planners, independent game producers, game studio managers, and game programmers. The interviews were conducted one-on-one between July and August 2022, each lasting about an hour. Researchers explained the purpose of the study and the interview outline to the experts, ensuring the protection of their privacy. The discussions focused on the impact of interface interactivity on players&#x2019; time perceptions, cognitive load, and emotional changes. Researchers monitored the conversations to keep them on track and asked follow-up questions based on the experts&#x2019; ideas. The interview focused on two main topics: (1) the effects of stimuli on time perception and (2) interactivity and time perception. Fixed questions included the following: What factors (e.g., visuals, sound, animations) influenced your perception of time during the waiting process? In what scenarios did you feel that the waiting time was longer? Did interactive environments make time feel faster or slower during the waiting process? How would you describe your experience? In addition to these predefined questions, the researchers conducted follow-up inquiries based on participants&#x2019; behaviors and feedback to gain deeper insights.</p>
</sec>
<sec id="s3-1-2">
<title>3.1.2 Experimental design</title>
<p>After the expert interviews, researchers developed the experimental system using the Unity 2018.4.36f1 game engine and SteamVR for VR content development. Researchers programmed the system using the C&#x23; language, utilizing open-source resources or those freely available within the Unity and SteamVR development environments. A VR archery game served as the main experiment. <xref ref-type="fig" rid="F3">Figure 3</xref> presents a schematic illustration of the experimental interfaces.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Experimental design of loading interfaces. <bold>(a)</bold> Non-interactive loading interface, <bold>(b)</bold> Interactive loading interface, <bold>(c)</bold> The main game of experiment.</p>
</caption>
<alt-text>Three screenshots from the pilot study show two types of loading interfaces and the main VR gameplay. The first is a non-interactive loading screen, the second is an interactive loading interface, and the third shows the main VR game scene.</alt-text>
<graphic xlink:href="frvir-06-1540406-g003.tif"/>
</fig>
<p>The Research Ethics Committee approved the two-phase study design (pilot study and formal study), questionnaire survey, and informed consent form. Participants were fully informed about the research process and objectives and were asked to sign an informed consent form to confirm their understanding and agreement. In this phase, 20 participants were recruited, including 6 males and 14 females, aged between 20 and 29. All participants had essential gaming experience and normal or corrected vision with no diagnosed visual impairments or eye diseases.</p>
<p>Experts generally agreed that interactive loading interfaces should be simple and intuitive to reduce users&#x2019; time perception and enhance the user experience. For example, while incorporating games during waiting times can alleviate impatience, the complexity of game interactions should match the waiting duration to avoid increasing cognitive load and interfering with game flow. Interactions should remain lightweight to maintain user immersion. Experts also noted that overly-complex interactive elements could increase cognitive load and negatively impact the gaming experience. They pointed out that excessive visual feedback, complicated user interface designs, too many flashes, and voice prompts could lead to higher cognitive load.</p>
</sec>
<sec id="s3-1-3">
<title>3.1.3 Experimental procedure</title>
<p>Based on the experts&#x2019; perspectives, the independent variables for the experimental stimuli were the type of loading interface (interactive or non-interactive) and the length of waiting time (15&#xa0;s, 30&#xa0;s, and 60&#xa0;s). The dependent variables were waiting emotions and perceptions of waiting time. The 20 participants were tested using a within-subjects design, with the Latin Square Design randomly determining the order of the experimental stimuli. After the participants listened to the experiment instructions, researchers gave each participant a VR headset to experience the game loading interfaces in different sequences based on their assigned order. After each waiting period, participants played the archery game. This process was repeated multiple times.</p>
<p>For dependent variable measurement, subjective time perception was assessed using a closed-ended questionnaire based on the work of <xref ref-type="bibr" rid="B97">Seawright and Sampson (2007)</xref>, where participants responded by selecting predefined options. Emotional responses were measured using a subjective emotion questionnaire compiled in Chinese by <xref ref-type="bibr" rid="B55">Jou et al. (2006)</xref>, adapted initially from <xref ref-type="bibr" rid="B70">Levine et al. (1994)</xref>. The scale includes 14 emotion-check words, evenly divided into seven positive emotions (joyful, happy, pleased, contented, delighted, proud, fine) and seven negative emotions (sad, depressed, blue, gloomy, sorrowful, displeased, downhearted).</p>
<p>Responses were recorded using a 7-point Likert scale. The scoring method involved separately averaging each participant&#x2019;s ratings for positive and negative words, with negative scores inverted (multiplied by &#x2212;1) before averaging. The resulting scores were labeled &#x201c;positive emotion score&#x201d; and &#x201c;negative emotion score,&#x201d; where higher values reflect more substantial positive or negative emotional experiences, respectively.</p>
</sec>
</sec>
<sec id="s3-2">
<title>3.2 Validation</title>
<p>For both types of interfaces, researchers set the waiting times at 15&#xa0;s, 30&#xa0;s, and 60&#xa0;s. The experimental data for this phase are shown in <xref ref-type="table" rid="T2">Table 2</xref>. As the waiting time increased, players&#x2019; perceptions of waiting time also increased. However, time perceptions for the interactive interface were lower than those for the non-interactive interface. Therefore, H1a was supported. Additionally, different loading interfaces significantly affected time perceptions at 15 and 60&#xa0;s (p &#x3c; 0.05). Therefore, H2a was supported. That is, the longer the waiting time, the more significant the difference in time perceptions for interactive interfaces compared to non-interactive interfaces.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Comparison of waiting time perceptions.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Time passed</th>
<th align="left">Interface type</th>
<th align="left">Mean</th>
<th align="left">SD</th>
<th align="left">
<italic>z</italic>
</th>
<th align="left">
<italic>p</italic>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="2" align="center">15.0&#xa0;s</td>
<td align="center">Non-interactive</td>
<td align="center">41.000</td>
<td align="center">34.3588</td>
<td rowspan="2" align="center">2.56</td>
<td rowspan="2" align="center">0.011</td>
</tr>
<tr>
<td align="center">Interactive</td>
<td align="center">26.750</td>
<td align="center">26.1712</td>
</tr>
<tr>
<td rowspan="2" align="center">30.0&#xa0;s</td>
<td align="center">Non-interactive</td>
<td align="center">55.500</td>
<td align="center">33.3995</td>
<td rowspan="2" align="center">1.26</td>
<td rowspan="2" align="center">0.209</td>
</tr>
<tr>
<td align="center">Interactive</td>
<td align="center">46.500</td>
<td align="center">26.3629</td>
</tr>
<tr>
<td rowspan="2" align="center">60.0&#xa0;s</td>
<td align="center">Non-interactive</td>
<td align="center">90.000</td>
<td align="center">40.5229</td>
<td rowspan="2" align="center">2.79</td>
<td rowspan="2" align="center">0.005</td>
</tr>
<tr>
<td align="center">Interactive</td>
<td align="center">61.750</td>
<td align="center">42.8054</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The author then explored the impact of different loading times (15 s, 30 s, and 60&#xa0;s) and interface interactivity (non-interactive vs. interactive) on the subjects&#x2019; emotions, the results of which are shown in <xref ref-type="table" rid="T3">Table 3</xref>. For 15&#xa0;s of waiting time, the difference in interface type triggered positive emotions. The statistical analysis shows that the <italic>Z</italic> value was &#x2212;2.153 with a <italic>p</italic>-value of 0.0313, while negative emotions showed a <italic>Z</italic> value of &#x2212;3.209 with a <italic>p</italic>-value of 0.0013. For 30&#xa0;s of waiting time, the <italic>Z</italic> value for positive emotions was &#x2212;3.416 with a <italic>p</italic>-value of 0.0006 and that of negative emotions was &#x2212;3.258 with a <italic>p</italic>-value of 0.0011. Finally, for 60&#xa0;s of waiting time, the <italic>Z</italic>-value for positive emotions was &#x2212;3.727 with a <italic>p</italic>-value of 0.0002 and that of negative emotions was &#x2212;3.658 with a <italic>p</italic>-value of 0.003.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>User emotions for varying waiting times and interface types.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="3" align="center">Waiting time</th>
<th align="center">Positive emotions</th>
<th align="center">
<italic>z</italic>
</th>
<th align="center">
<italic>p</italic>
</th>
<th align="center">Negative emotions</th>
<th align="center">
<italic>z</italic>
</th>
<th align="center">
<italic>p</italic>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="4" align="center">15_s</td>
<td rowspan="2" align="center">Non-interactive</td>
<td align="center">Mean</td>
<td align="center">2.3571</td>
<td rowspan="4" align="center">&#x2212;2.153</td>
<td rowspan="4" align="center">0.0313</td>
<td align="center">2.3</td>
<td rowspan="4" align="center">&#x2212;3.209</td>
<td rowspan="4" align="center">0.0013</td>
</tr>
<tr>
<td align="center">SD</td>
<td align="center">1.10414</td>
<td align="center">1.18973</td>
</tr>
<tr>
<td rowspan="2" align="center">Interactive</td>
<td align="center">Mean</td>
<td align="center">3.2286</td>
<td align="center">1.3857</td>
</tr>
<tr>
<td align="center">SD</td>
<td align="center">1.01164</td>
<td align="center">0.5427</td>
</tr>
<tr>
<td rowspan="4" align="center">30_s</td>
<td rowspan="2" align="center">Non-interactive</td>
<td align="center">Mean</td>
<td align="center">2.3429</td>
<td rowspan="4" align="center">&#x2212;3.416</td>
<td rowspan="4" align="center">0.0006</td>
<td align="center">2.6143</td>
<td rowspan="4" align="center">&#x2212;3.258</td>
<td rowspan="4" align="center">0.0011</td>
</tr>
<tr>
<td align="center">SD</td>
<td align="center">0.99126</td>
<td align="center">1.25065</td>
</tr>
<tr>
<td rowspan="2" align="center">Interactive</td>
<td align="center">Mean</td>
<td align="center">3.7786</td>
<td align="center">1.2929</td>
</tr>
<tr>
<td align="center">SD</td>
<td align="center">0.90288</td>
<td align="center">0.35744</td>
</tr>
<tr>
<td rowspan="4" align="center">60_s</td>
<td rowspan="2" align="center">Non-interactive</td>
<td align="center">Man</td>
<td align="center">1.8214</td>
<td rowspan="4" align="center">&#x2212;3.727</td>
<td rowspan="4" align="center">0.0002</td>
<td align="center">3.0643</td>
<td rowspan="4" align="center">&#x2212;3.658</td>
<td rowspan="4" align="center">0.0003</td>
</tr>
<tr>
<td align="center">SD</td>
<td align="center">0.88777</td>
<td align="center">1.20994</td>
</tr>
<tr>
<td rowspan="2" align="center">Interactive</td>
<td align="center">Mean</td>
<td align="center">3.6643</td>
<td align="center">1.35</td>
</tr>
<tr>
<td align="center">SD</td>
<td align="center">0.78831</td>
<td align="center">0.5233</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>These results support H1b, which states that the interactive interface will engender more positive emotions and less negative emotions than the non-interactive interface. In addition, these results also support H2b, which states that the longer the waiting time, the greater the difference in emotion for the interactive interface compared to the non-interactive interface.</p>
</sec>
<sec id="s3-3">
<title>3.3 Conclusions</title>
<p>The main objective of the pilot study was to investigate the relationship between time perceptions and emotions for different waiting times and interactive vs. non-interactive loading interfaces. The results of the study revealed the following: (1) an interactive loading interface can shorten perceptions of waiting time; (2) an interactive loading interface is highly effective at shortening perceptions of waiting time for long waiting periods (60&#xa0;s), and the interactive interface generally elicited shorter perceptions of waiting time than did the non-interactive interface; and (3) an increase in waiting time leads to an increase in perceptions of waiting time, and the longer the waiting time, the more positive emotions decrease and the more negative emotions increase. In addition, using an interactive interface is more likely to maintain positive emotions and reduce negative emotions than using a non-interactive interface.</p>
<p>Through observation of the participants, this study found that after waiting for a certain period, the participants started to pay attention to external factors such as the environment and objects in the loading interface. Some respondents thought that the difference between interactive and non-interactive interfaces was insignificant in terms of time predictions. Post-experiment interviews suggested that this might be related to users&#x2019; familiarity with the long waiting times of VR games. This study also found that the level of players&#x2019; engagement in the previous game may have affected their waiting experience in the next game.</p>
</sec>
</sec>
<sec id="s4">
<title>4 Formal study: effects of visual stimulation intensity on time perceptions, cognitive load, and emotions</title>
<sec id="s4-1">
<title>4.1 Research methods</title>
<p>The formal study incorporated additional visual stimulation elements to further explore their effects on time perceptions, emotions, and cognitive load.</p>
<sec id="s4-1-1">
<title>4.1.1 Experimental design</title>
<p>In the formal research phase, we included more elements of visual stimulation that can affect time perceptions. VR users need to exert more attention and cognitive resources than players of traditional games (<xref ref-type="bibr" rid="B39">Fisher et al., 2018</xref>). Thus, in order to further understand the effects of visual stimuli on VR waiting experiences, this study categorized the following experimental stimuli based on relevant studies on the psychology of time: interactivity, visual stimulation (<xref ref-type="bibr" rid="B54">Jording et al., 2022</xref>), and details of the background environment (<xref ref-type="bibr" rid="B82">Mostajeran et al., 2023</xref>). As shown in <xref ref-type="fig" rid="F4">Figure 4</xref>, we superimposed the different elements in four groups for each type of interface: environmental details (None) x visual stimulation (None), environmental details (None) x visual stimulation (Yes), environmental details (Yes) x visual stimulation (None), and environmental details (Yes) x visual stimulation (Yes).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Interfaces with varying visual stimulation intensities: (top) non-interactive loading interface, (bottom) interactive loading interface.</p>
</caption>
<alt-text>Eight VR screenshots show experimental stimuli designs. The top row displays non-interactive loading interfaces; the bottom row shows interactive ones. Each row includes four variations combining presence or absence of background environmental detail and visual particle effects.</alt-text>
<graphic xlink:href="frvir-06-1540406-g004.tif"/>
</fig>
</sec>
<sec id="s4-1-2">
<title>4.1.2 Instrument and participants</title>
<p>The study collected data on dependent variables through questionnaires and physiological signals. The questionnaire survey included assessments of both time perception (<xref ref-type="bibr" rid="B97">Seawright and Sampson, 2007</xref>) and emotions (<xref ref-type="bibr" rid="B55">Jou et al., 2006</xref>), using the same two questionnaires as those employed in the pilot study. Cognitive load was measured using physiological signals collected by the HP Reverb G2 Omnicept headset, with values ranging from 0 to 1. We analyzed trends in dependent variables using mean analysis and tested for statistical significance with a repeated measure one-way ANOVA.</p>
<p>Before testing the hypothesis, we utilized G&#x2a; Power 3.1 to estimate the minimum sample size (<xref ref-type="bibr" rid="B38">Faul et al., 2009</xref>). The software was set with the following parameters: number of groups &#x3d; 1, number of measurements &#x3d; 8, effect size (Cohen&#x2019;s f) &#x3d; 0.25, significance level (<italic>&#x3b1;</italic>) &#x3d; 0.05, statistical power (1-&#x3b2;) &#x3d; 0.95, and nonsphericity correction (&#x3b5;) &#x3d; 0.75. The test type selected in G&#x2a; Power was F test (ANOVA: Repeated measures, within factors; default correlation among repeated measures &#x3d; 0.5), as our study design involved within-subject comparisons. The results indicated that the recommended sample size was n &#x3d; 28. A recent VR study on time perception by <xref ref-type="bibr" rid="B86">Niknam et al. (2024)</xref> also used similar parameters for sample size estimation. In the formal stage of the study, 38 subjects were recruited, aged between 20 and 34&#xa0;years. After 4 subjects who did not complete the experiment were removed from the sample, 34 valid samples (14 male and 20 female participants) remained. All participants had experience playing video games. They gave written informed consent and were equipped with the HP Reverb G2 Omnicept Edition VR headset before the experiment. <xref ref-type="fig" rid="F5">Figure 5</xref> presents images of the experimental process. <xref ref-type="fig" rid="F6">Figures 6</xref>, <xref ref-type="fig" rid="F7">7</xref> detail the visuals observed by participants within the VR headset and the experimental procedure they experienced.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Images of experimental procedure.</p>
</caption>
<alt-text>Participants wearing HP Reverb G2 Omnicept VR headsets and holding controllers are engaged in a loading interface experiment inside a laboratory setting.</alt-text>
<graphic xlink:href="frvir-06-1540406-g005.tif"/>
</fig>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Concept map of experimental process experienced by subjects. <bold>(a)</bold> Main game level 1, <bold>(b)</bold> Different waiting experimental stimulants, <bold>(c)</bold> Time perception and emotion questionnaire.</p>
</caption>
<alt-text>The image illustrates the experimental procedure each participant experienced. VR screenshots display the main game scene, eight loading interface stimuli, and the questionnaire used to assess emotion and time perception.</alt-text>
<graphic xlink:href="frvir-06-1540406-g006.tif"/>
</fig>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Diagram of experimental process.</p>
</caption>
<alt-text>Process diagram of the experimental flow from participant instruction and consent to headset calibration, gameplay, exposure to loading interfaces, and completion of questionnaires measuring emotion, time perception, and cognitive load.</alt-text>
<graphic xlink:href="frvir-06-1540406-g007.tif"/>
</fig>
</sec>
<sec id="s4-1-3">
<title>4.1.3 Experimental procedure</title>
<p>Before the experiment, researchers explained the game operation to the subjects, who signed informed consent forms and were equipped with the HP Reverb G2 Omnicept Edition VR headset. The experiment used a within-subjects design with a Latin Square Design to control the sequence of stimulation. Researchers explained the game rules before the experiment to ensure subjects understood them. All subjects experienced Level 1 of the main game (30&#xa0;s) and a waiting page (60&#xa0;s, sequence determined by Latin Square Design), as shown in <xref ref-type="fig" rid="F6">Figures 6</xref>, <xref ref-type="fig" rid="F7">7</xref>. After completing all experiments and questionnaires while wearing the VR headset to avoid interruptions, subjects participated in open-ended interviews. Each session lasted approximately 1&#xa0;h per subject.</p>
</sec>
</sec>
<sec id="s4-2">
<title>4.2 Results of visual stimulation intensity on time perceptions and cognitive load</title>
<p>In this stage, the authors analyzed the influence of stimuli of different intensities on the interactive and non-interactive loading interfaces by averaging the trends of the dependent variables. The results presented in <xref ref-type="table" rid="T4">Table 4</xref> show that the lowest cognitive load was found for environmental details (None) x visual stimulation (None) in the non-interactive interface (0.521), and the lowest cognitive load was found for environmental details (None) x visual stimulation (Yes) and environmental details (Yes) x visual stimulation (None) in the interactive interface (0.54).</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Statistical analysis of effects of interface interactivity and visual stimulation on time perceptions and cognitive load.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th rowspan="3" align="center">Stimulus NO.</th>
<th rowspan="3" align="center">Visual stimulation</th>
<th colspan="4" align="center">Non-interactive interface</th>
<th rowspan="3" align="center">Stimulus NO.</th>
<th colspan="4" align="center">Interactive interface</th>
</tr>
<tr>
<th colspan="2" align="center">Time perception</th>
<th colspan="2" align="center">Cognitive load</th>
<th colspan="2" align="center">Time perception</th>
<th colspan="2" align="center">Cognitive load</th>
</tr>
<tr>
<th align="center">Mean</th>
<th align="center">SD</th>
<th align="center">Mean</th>
<th align="center">SD</th>
<th align="center">Mean</th>
<th align="center">SD</th>
<th align="center">Mean</th>
<th align="center">SD</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">1</td>
<td align="left">ED (N) x<break/>VS (N)</td>
<td align="center">78.530</td>
<td align="center">38.544</td>
<td align="center">0.521</td>
<td align="center">0.075</td>
<td align="center">2</td>
<td align="center">71.910</td>
<td align="center">34.924</td>
<td align="left">0.550</td>
<td align="center">0.075</td>
</tr>
<tr>
<td align="center">3</td>
<td align="left">ED (N) x<break/>VS (Y)</td>
<td align="center">73.090</td>
<td align="center">38.198</td>
<td align="center">0.533</td>
<td align="center">0.059</td>
<td align="center">4</td>
<td align="center">69.410</td>
<td align="center">30.963</td>
<td align="center">0.540</td>
<td align="center">0.059</td>
</tr>
<tr>
<td align="center">5</td>
<td align="left">ED (Y) x<break/>VS (N)</td>
<td align="center">71.320</td>
<td align="center">34.186</td>
<td align="center">0.554</td>
<td align="center">0.089</td>
<td align="center">6</td>
<td align="center">70.000</td>
<td align="center">31.599</td>
<td align="center">0.540</td>
<td align="center">0.089</td>
</tr>
<tr>
<td align="center">7</td>
<td align="left">ED (Y) x<break/>VS (Y)</td>
<td align="center">76.760</td>
<td align="center">39.464</td>
<td align="center">0.564</td>
<td align="center">0.061</td>
<td align="center">8</td>
<td align="center">66.320</td>
<td align="center">33.355</td>
<td align="center">0.558</td>
<td align="center">0.061</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The effects of different visual stimulation intensities on time perceptions (<italic>F</italic> (3,31) &#x3d; 0.447, <italic>p</italic> &#x3d; 0.721) and cognitive load (<italic>F</italic> (3,31) &#x3d; 0.503, <italic>p</italic> &#x3d; 0.683) were not significantly different for the interactive interface. Therefore, H3a and H5a were not supported. In the case of the non-interactive interface, visual stimulation intensity was not significantly different for time perceptions (<italic>F</italic> (3,31) &#x3d; 0.902, <italic>p</italic> &#x3d; 0.451); thus, H3b was not supported. However, cognitive load reached a significant difference (<italic>F</italic> (3,31) &#x3d; 2.892, <italic>p</italic> &#x3d; 0.039), thereby supporting H5b.</p>
</sec>
<sec id="s4-3">
<title>4.3 Results of visual stimulation intensity on emotions</title>
<p>The emotion questionnaire consisted of 14 questions on positive and negative emotions. The questionnaire used a 7-point Likert scale, and reliability was confirmed with a Cronbach&#x2019;s &#x3b1; coefficient of 0.758. The results show that different visual stimulation factors affected positive and negative emotions for both the interactive and non-interactive interfaces. The means of positive and negative emotions were respectively 2.85 and 2.25 in the absence of visual stimulation without environmental details; however, when both visual stimulation and environmental details were present, positive emotions decreased slightly to 3.08, and negative emotions increased slightly to 2.17, which implies that visual stimulation intensity may affect participants&#x2019; emotions. Detailed data are shown in <xref ref-type="table" rid="T5">Table 5</xref>. In the case of the interactive interface, visual stimulation intensity did not reach a significant difference for positive emotions (<italic>F</italic> (3,31) &#x3d; 1.15, <italic>p</italic> &#x3d; 0.328), which means H4a was not supported. Negative emotions (<italic>F</italic> (3,31) &#x3d; 3.109, <italic>p</italic> &#x3d; 0.039) reached a significant difference, providing support for H4c. For the non-interactive interface, visual stimulation intensity was not significant for both positive (<italic>F</italic> (3,31) &#x3d; 1.425, <italic>p</italic> &#x3d; 0.24) and negative (<italic>F</italic> (3,31) &#x3d; 1.655, <italic>p</italic> &#x3d; 0.182) emotions; thus, H4b and H4d were not supported.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>Analysis of effects of interface interactivity and visual stimulation on emotions.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th rowspan="3" align="center">Stimulus NO.</th>
<th rowspan="3" align="center">Visual stimulation intensity</th>
<th colspan="4" align="center">Non-interactive interface</th>
<th rowspan="3" align="center">Stimulus NO.</th>
<th colspan="4" align="center">Interactive interface</th>
</tr>
<tr>
<th colspan="2" align="center">Positive emotions</th>
<th colspan="2" align="center">Negative emotions</th>
<th colspan="2" align="center">Positive emotions</th>
<th colspan="2" align="center">Negative emotions</th>
</tr>
<tr>
<th align="center">Mean</th>
<th align="center">SD</th>
<th align="center">Mean</th>
<th align="center">SD</th>
<th align="center">Mean</th>
<th align="center">SD</th>
<th align="center">Mean</th>
<th align="center">SD</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">1</td>
<td align="center">ED (N) x<break/>VS (N)</td>
<td align="center">2.85</td>
<td align="center">1.59</td>
<td align="center">2.25</td>
<td align="left"/>
<td align="center">2</td>
<td align="center">3.36</td>
<td align="center">1.74</td>
<td align="left">1.60</td>
<td align="center">0.90</td>
</tr>
<tr>
<td align="center">3</td>
<td align="center">ED (N) x<break/>VS (Y)</td>
<td align="center">2.82</td>
<td align="center">1.64</td>
<td align="center">2.51</td>
<td align="center">1.66</td>
<td align="center">4</td>
<td align="center">3.19</td>
<td align="center">1.49</td>
<td align="center">2.15</td>
<td align="center">1.34</td>
</tr>
<tr>
<td align="center">5</td>
<td align="center">ED (Y) x<break/>VS (N)</td>
<td align="center">3.33</td>
<td align="center">1.81</td>
<td align="center">2.01</td>
<td align="center">1.27</td>
<td align="center">6</td>
<td align="center">3.54</td>
<td align="center">1.58</td>
<td align="center">1.75</td>
<td align="center">1.10</td>
</tr>
<tr>
<td align="center">7</td>
<td align="center">ED (Y) x<break/>VS (Y)</td>
<td align="center">3.08</td>
<td align="center">1.80</td>
<td align="center">2.17</td>
<td align="center">1.42</td>
<td align="center">8</td>
<td align="center">3.34</td>
<td align="center">1.70</td>
<td align="center">1.89</td>
<td align="center">1.19</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s5">
<title>5 Discussion and conclusion</title>
<sec id="s5-1">
<title>5.1 Discussion</title>
<p>Based on SOR and time perception theories, this study explores how sensory stimulation in VR game loading interfaces affects individuals&#x2019; internal states (time perceptions and emotions) and responses (cognitive load). Previous studies on loading interfaces often used simulations. They thus lacked comprehensive research on waiting scenarios in real VR games, typically focusing on 360-degree image interactions or optimizing algorithms to enhance viewing smoothness (<xref ref-type="bibr" rid="B135">Bendre et al., 2024</xref>; <xref ref-type="bibr" rid="B149">Sun et al., 2018</xref>; <xref ref-type="bibr" rid="B148">Zeynali et al., 2024</xref>). This study fills the research gap on loading interfaces in VR environments.</p>
<p>As <xref ref-type="bibr" rid="B106">Suh and Prophet (2018)</xref> and <xref ref-type="bibr" rid="B139">Kourouthanassis et al. (2015)</xref> have suggested, sensory stimulation is crucial for enhancing user experiences in immersive environments. Visual stimulation in immersive technologies evokes cognitive and emotional states, leading to behavioral changes. This study considers the effects of visual stimulation and interactivity, combining the characteristics of VR and interactive waiting experiences in the gaming industry. This study expands the research field of loading interfaces by exploring the impact of interactivity and visual stimulation during waiting times.</p>
<sec id="s5-1-1">
<title>5.1.1 Impact of interactivity on time perceptions and emotions</title>
<p>This study found that increasing scene details and interactive elements effectively stimulates players&#x2019; desire to explore, reduces time perception, triggers positive emotional effects, and lessens negative emotions such as impatience (as found by <xref ref-type="bibr" rid="B136">Bhaskaran et al. (2022)</xref>). However, the length of waiting times must be considered in the design of interactive loading interfaces. When waiting times are short, the impact on time perceptions might be negligible. However, adding interactive elements can significantly reduce players&#x2019; negative feelings when waiting times are longer (e.g., 60&#xa0;s).</p>
<p>In current VR game loading interfaces, many games have waiting times of up to 1&#xa0;minute. Many of these present only black screens for these waiting periods (e.g., Iron Man in VR and Batman Arkham VR). Some games, such as Sniper Elite VR and ARK Park&#x2019;s PSVR, allow players to look around in a 3D environment that does not contain interactive objects. This rotatable viewpoint is similar to the non-interactive interface of our experimental system. Recently, a number of games have introduced more interactive designs. For example, Smalland VR (2023) features particle effects and text prompts during loading, and Bulletstorm VR (2023) includes gameplay videos on the loading screen. These examples highlight the gaming industry&#x2019;s emphasis on improving player experience during waiting periods and demonstrate the practical contributions of this study to VR user experience.</p>
</sec>
<sec id="s5-1-2">
<title>5.1.2 Impact of visual stimulation intensity</title>
<p>As <xref ref-type="bibr" rid="B74">Liu (2017)</xref> pointed out, visual stimulation in immersive virtual environments affects human perception and behavior in distinctly different ways. This study found that in non-interactive interfaces, increased levels of visual stimulation reduced participants&#x2019; time perceptions and increased their cognitive load. However, this effect was more stable in interactive interfaces. Previous research suggests that managing cognitive load is crucial when dealing with multiple messages and emotion regulation (<xref ref-type="bibr" rid="B64">Lang, 2020</xref>; Lang et al., 2007). Additionally, research has shown that adjusting visual complexity can help users to stay more focused during VR tasks (<xref ref-type="bibr" rid="B28">Chiossi et al., 2022</xref>). As a result, in VR interactive interfaces, the effects of visual stimulation on time perceptions and cognitive load may be less pronounced after a certain threshold has been reached, as players&#x2019; attentional resources are focused on the interactive elements.</p>
<p>Even though the experimental results of the present study did not reach statistical significance in terms of the effect of visual stimulation on time perceptions between the non-interactive and interactive interfaces, we still observed changes in time perceptions as visual stimulation increased. Other possible factors may have had a more significant effect on time perceptions, making the effect of visual stimulation smaller. This phenomenon may be due to the weak contrast between the visual stimulation and non-interactive interfaces as well as the possible anchoring effect of the fixed loading interface in the experimental design (<xref ref-type="bibr" rid="B143">Kahneman et al., 1982</xref>), which makes the data susceptible to the effect of repeated testing as well as the effect of the expected time.</p>
<p>Visual stimuli in the interactive interface were found to significantly affect negative emotions. When players interacted with the interface, the effect of other visual stimuli on their negative emotions was amplified. Although the impact of visual stimulation on positive emotions did not reach statistical significance in both non-interactive and interactive interfaces, the average values of positive emotions still showed that visual variety is an effective tool for eliciting pleasure, as found in a study by <xref ref-type="bibr" rid="B8">Batistatou et al. (2022)</xref>.</p>
<p>Notably, because participants experienced both interactive and non-interactive interfaces, we observed and gathered from interviews that, compared to their experiences with interactive interfaces, participants felt more disappointed when they discovered that visually detailed scenes lacked interactive elements. This finding can be explained by motivation theory, which posits that intrinsic motivation arises from curiosity and interest in activities perceived as satisfying and stimulating (<xref ref-type="bibr" rid="B137">Deci and Ryan, 1985</xref>). The interactive loading interface designed in this study may enhance or activate players&#x2019; intrinsic motivation, triggering their desire for further interaction with the visual stimuli.</p>
<p>It is worth noting that the choice of visual stimuli should be adjusted to the specific game environment and individual player differences. Some respondents &#x201c;<italic>preferred scenes with details</italic>&#x201d; because the details made them curious and encouraged a sense of exploration and anticipation. The design of visual particles, especially fast-moving particles (<xref ref-type="bibr" rid="B54">Jording et al., 2022</xref>; <xref ref-type="bibr" rid="B144">Novotny and Laidlaw, 2024</xref>), may generate positive or negative emotions according to personal preferences. Our results discovered that interactivity was reliably found to reduce time perceptions and mitigate negative emotions across player preferences.</p>
</sec>
</sec>
<sec id="s5-2">
<title>5.2 Conclusion</title>
<p>This study applied SOR theory to validate the feasibility of designing interactive loading interfaces for VR games and the effectiveness of incorporating psychological stimuli related to waiting into game-loading interfaces. The results confirmed that such effects directly result from the stimuli on the individual&#x2019;s internal and external states. By adding interactive elements and environmental details during waiting times, this research significantly improved players&#x2019; waiting experiences and mitigated the adverse effects caused by prolonged waiting periods. Moreover, physiological data measurements indirectly showed that the responses triggered during VR game waits might align with similar paradigms found in past research.</p>
<p>Compared to the non-interactive loading interface, the interactive loading interface resulted in shorter time perceptions, increased positive emotions, and decreased negative emotions. As waiting times increased, the interactivity of the interface increased the variance in time perceptions and emotional responses. In the interactive interface, players&#x2019; attention was focused on the interactive elements; therefore, after a certain threshold, the effect of visual stimuli on time perceptions and cognitive load decreased. For the non-interactive interface, increases in visual stimulation were associated with increases in cognitive load.</p>
<p>Visual stimulation elicited both positive and negative emotions. Players&#x2019; interactions also seemed to amplify the effects of visual stimuli on negative emotions. Interactions may also satisfy players&#x2019; intrinsic motivation and trigger expectations for further interactions with other visual stimuli; thus, attention should be paid to players&#x2019; expectations when designing interfaces with visual stimulation.</p>
</sec>
<sec id="s5-3">
<title>5.3 Limitations and suggestions for future study</title>
<p>First, this study focused on the direct impact of stimuli on internal and external variables within SOR theory instead of treating individual internal responses as mediating variables. Future research should consider the effects of time perceptions and emotions on cognitive load. Additionally, to enable the participants to master the game quickly, the game levels in this study were designed to be less challenging. This may have affected players&#x2019; intrinsic motivation. Further research is necessary to examine the consistency of the findings across different game genres. In terms of experimental design, we used repeated testing. However, the long experimental time may have triggered the anchoring and practice effects of repeated experimental testing (<xref ref-type="bibr" rid="B142">Liu et al., 2024</xref>). Experimental times and randomization should be carefully considered in future studies.</p>
<p>Additionally, this study did not measure a baseline for the dependent variables before the experiment, which may affect the interpretation of results. Participants&#x2019; initial states (e.g., emotion and cognitive load) could influence their perception of time, as psychological factors such as anxiety or focus level may impact time perception independently of the VR environment manipulation. Future studies should consider incorporating baseline measurements&#x2014;such as static waiting tasks or time estimation in a quiet environment&#x2014;before the experiment to ensure that subsequent changes in measurement can be attributed to the VR interface manipulation rather than individual differences.</p>
<p>Furthermore, future research could adopt a more comprehensive approach in terms of cognitive load measurement. While some scholars question the reliability of subjective measures (<xref ref-type="bibr" rid="B18">Buchner et al., 2025</xref>; <xref ref-type="bibr" rid="B60">Kosch et al., 2023</xref>), many studies suggest combining subjective and objective assessments to provide a more holistic analysis (<xref ref-type="bibr" rid="B5">Ayres et al., 2021</xref>). For instance, existing research widely employs NASA-TLX alongside physiological measurements, including electrodermal activity (EDA) (<xref ref-type="bibr" rid="B3">Armougum et al., 2019</xref>; <xref ref-type="bibr" rid="B28">Chiossi et al., 2022</xref>; <xref ref-type="bibr" rid="B58">Kim et al., 2025</xref>), heart rate variability (HRV) (<xref ref-type="bibr" rid="B20">Che et al., 2025</xref>; <xref ref-type="bibr" rid="B93">Reddy et al., 2022</xref>), electroencephalography (EEG) (<xref ref-type="bibr" rid="B27">Chiossi et al., 2025</xref>; <xref ref-type="bibr" rid="B114">van Weelden et al., 2024</xref>), and electrocardiography (ECG) (<xref ref-type="bibr" rid="B118">Wei&#xdf; and Pfeiffer, 2024</xref>). Future research should integrate baseline measurements with multimodal cognitive load assessment to enhance data reliability and interpretability. Furthermore, the selected measurement methods may have failed to capture subtle differences in time perceptions due to visual stimulation; future research could include more refined measurement methods or more sensitive experimental designs to detect the effects of visual stimulation on time perceptions.</p>
<p>Because individual player preferences may have affected players&#x2019; responses to the stimuli presented on the loading interface, we suggest that the population of subjects be further categorized to increase the accuracy of the findings. Numerous VR studies have focused on the interaction effects of multimodal sensory experiences on user experience (<xref ref-type="bibr" rid="B36">Dzardanova et al., 2024</xref>; <xref ref-type="bibr" rid="B53">Jacucci, 2017</xref>; <xref ref-type="bibr" rid="B124">Yuan et al., 2023</xref>). Future experiments could consider incorporating auditory (<xref ref-type="bibr" rid="B15">Bosman et al., 2024</xref>; <xref ref-type="bibr" rid="B90">Picard et al., 2023</xref>) and tactile interactions as stimuli. Finally, since the stimuli in waiting scenarios are influenced by individual player experiences, we suggest that future studies could further segment the participant groups to derive more accurate conclusions.</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The data is not available for public access due to the following restrictions: the dataset includes sensitive information that could compromise the privacy of research participants. Data access is therefore restricted to the research team and under strict ethical standards to ensure data protection and confidentiality. Requests to access the datasets should be directed to Tzu-Hsuan Wang, <email>winnie9874@gmail.com</email>.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Research Ethics Committee of National Taiwan University. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>Y-TH: Conceptualization, Funding acquisition, Methodology, Supervision, Writing &#x2013; review and editing. C-CH: Conceptualization, Data curation, Formal Analysis, Investigation, Writing &#x2013; original draft. T-HW: Project administration, Validation, Visualization, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This research made possible by the Taiwan MOE Teaching Practice Research Programme, Project number MOE-113-TPRHA-0025-006Y1.</p>
</sec>
<ack>
<p>This research was funded by the Taiwan MOE Teaching Practice Research Programme.</p>
</ack>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ahmadi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Michalka</surname>
<given-names>S. W.</given-names>
</name>
<name>
<surname>Lenzoni</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Najafabadi</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Bai</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Sumich</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). &#x201c;<article-title>Cognitive load measurement with physiological sensors in virtual reality during physical activity</article-title>,&#x201d; in <source>Proceedings of the 29th ACM symposium on virtual reality software and technology</source> (<publisher-loc>Christchurch, New Zealand</publisher-loc>). <pub-id pub-id-type="doi">10.1145/3611659.3615704</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Alhilal</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Tsui</surname>
<given-names>Y. H.</given-names>
</name>
<name>
<surname>Hui</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2024</year>). &#x201c;<article-title>FovOptix: human vision-compatible video encoding and adaptive streaming in VR cloud gaming</article-title>,&#x201d; in <source>MMSys 2024 - proceedings of the 2024 ACM multimedia systems conference</source>.</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Armougum</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Orriols</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Gaston-Bellegarde</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Marle</surname>
<given-names>C. J.-L.</given-names>
</name>
<name>
<surname>Piolino</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Virtual reality: a new method to investigate cognitive load during navigation</article-title>. <source>J. Environ. Psychol.</source> <volume>65</volume>, <fpage>101338</fpage>. <pub-id pub-id-type="doi">10.1016/j.jenvp.2019.101338</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Asish</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Kulshreshth</surname>
<given-names>A. K.</given-names>
</name>
<name>
<surname>Borst</surname>
<given-names>C. W.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Detecting distracted students in educational VR environments using machine learning on eye gaze data</article-title>. <source>Comput. Graph. (Pergamon)</source> <volume>109</volume>, <fpage>75</fpage>&#x2013;<lpage>87</lpage>. <pub-id pub-id-type="doi">10.1016/j.cag.2022.10.007</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ayres</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>J. Y.</given-names>
</name>
<name>
<surname>Paas</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>van Merri&#xeb;nboer</surname>
<given-names>J. J. G.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The validity of physiological measures to identify differences in intrinsic cognitive load</article-title>. <source>Front. Psychol.</source> <volume>12</volume>, <fpage>702538</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2021.702538</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ba&#xf1;os</surname>
<given-names>R. M.</given-names>
</name>
<name>
<surname>Botella</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Alca&#xf1;iz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lia&#xf1;o</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Guerrero</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Rey</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Immersion and emotion: their impact on the sense of presence</article-title>. <source>Cyberpsychology and Behav.</source> <volume>7</volume> (<issue>6</issue>), <fpage>734</fpage>&#x2013;<lpage>741</lpage>. <pub-id pub-id-type="doi">10.1089/cpb.2004.7.734</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bartyzel</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Igras-Cybulska</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hekiert</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Majdak</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>&#x141;ukawski</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Bohn&#xe9;</surname>
<given-names>T.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>Exploring user reception of speech-controlled virtual reality environment for voice and public speaking training</article-title>. <source>Comput. and Graph.</source>, <volume>126</volume>, <fpage>104160</fpage>. <pub-id pub-id-type="doi">10.1016/j.cag.2024.104160</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Batistatou</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Vandeville</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Delevoye-Turrell</surname>
<given-names>Y. N.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Virtual reality to evaluate the impact of colorful interventions and nature elements on spontaneous walking, gaze, and emotion</article-title>. <source>Front. Virtual Real.</source> <volume>3</volume>. <pub-id pub-id-type="doi">10.3389/frvir.2022.819597</pub-id>
</citation>
</ref>
<ref id="B135">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bendre</surname>
<given-names>P. A.</given-names>
</name>
<name>
<surname>Kumar</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>A.</surname>
<given-names>F.A.</given-names>
</name>
</person-group> (<year>2024</year>). <source>Enhancing 360- degree video streaming via selective inpainting for bandwidth optimization</source>. <article-title>2024 IEEE 21st Consumer Communications &#x26; Networking Conference (CCNC)</article-title>.</citation>
</ref>
<ref id="B136">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bhaskaran</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Marappan</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Veeramanickam</surname>
<given-names>M. R. M.</given-names>
</name>
<name>
<surname>Bharathiraja</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Hariharan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Pradeepa</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Sentiment Analysis Model using Text and Emoticons for Pharmaceutical &#x26; Healthcare Industries</article-title>. <source>Proceedings - 2022 2nd International Conference on Innovative Sustainable Computational Technologies, CISCT 2022</source>.</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bi</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Chi</surname>
<given-names>Y.-C.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>Y.-Y.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>M.-J.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>B.-Y.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Blow your mind: exploring the effects of scene-switching and visualization of time constraints on brainstorming in virtual reality</article-title>. <source>Proc. ACM Hum.-Comput. Interact.</source> <volume>8</volume> (<issue>CSCW2</issue>), <fpage>1</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1145/3687026</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Birt</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Vasilevski</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Comparison of single and multiuser immersive mobile virtual reality usability in construction education</article-title>. <source>Educ. Technol. Soc.</source> <volume>24</volume> (<issue>2</issue>), <fpage>93</fpage>&#x2013;<lpage>106</lpage>. <comment>Available online at: <ext-link ext-link-type="uri" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://www.jstor.org/stable/27004934">https://www.jstor.org/stable/27004934</ext-link>
</comment>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bitner</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>1992</year>). <article-title>Servicescapes: the impact of physical surroundings on customers and employees</article-title>. <source>J. Mark.</source> <volume>56</volume> (<issue>2</issue>), <fpage>57</fpage>&#x2013;<lpage>71</lpage>. <pub-id pub-id-type="doi">10.1177/002224299205600205</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Block</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Gruber</surname>
<given-names>R. P.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Time perception, attention, and memory: a selective review</article-title>. <source>Acta Psychol.</source> <volume>149</volume>, <fpage>129</fpage>&#x2013;<lpage>133</lpage>. <pub-id pub-id-type="doi">10.1016/j.actpsy.2013.11.003</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Block</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Zakay</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Models of psychological time revisited</article-title>. <source>Time mind</source> Editor H. Helfrich (Kirkland, WA: Hogrefe &#x26; Huber), <fpage>171</fpage>&#x2013;<lpage>195</lpage>.</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Block</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Zakay</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Prospective and retrospective duration judgments: a meta-analytic review</article-title>. <source>Psychonomic Bull. and Rev.</source> <volume>4</volume> (<issue>2</issue>), <fpage>184</fpage>&#x2013;<lpage>197</lpage>. <pub-id pub-id-type="doi">10.3758/BF03209393</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bosman</surname>
<given-names>I. D. V.</given-names>
</name>
<name>
<surname>Buruk</surname>
<given-names>O. O.</given-names>
</name>
<name>
<surname>J&#xf8;rgensen</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Hamari</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>The effect of audio on the experience in virtual reality: a scoping review</article-title>. <source>Behav. Inf. Technol.</source> <volume>43</volume> (<issue>1</issue>), <fpage>165</fpage>&#x2013;<lpage>199</lpage>. <pub-id pub-id-type="doi">10.1080/0144929X.2022.2158371</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Britten</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Google doodles and collective memory-making</article-title>,&#x201d; in <source>Handbook of visual communication: theory, methods, and media</source>, <fpage>334</fpage>&#x2013;<lpage>348</lpage>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.scopus.com/inward/record.uri?eid=2-s2.0-85105366196&#x26;partnerID=40&#x26;md5=dca95764c70bf47a31b04cff3b7c3808">https://www.scopus.com/inward/record.uri?eid&#x3d;2-s2.0-85105366196&#x26;partnerID&#x3d;40&#x26;md5&#x3d;dca95764c70bf47a31b04cff3b7c3808</ext-link>.</comment>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Brown</surname>
<given-names>S. W.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Time, change, and motion: the effects of stimulus movement on temporal perception</article-title>. <source>Percept. and Psychophys.</source> <volume>57</volume> (<issue>1</issue>), <fpage>105</fpage>&#x2013;<lpage>116</lpage>. <pub-id pub-id-type="doi">10.3758/BF03211853</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Buchner</surname>
<given-names>S. L.</given-names>
</name>
<name>
<surname>Kintz</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J. Y.</given-names>
</name>
<name>
<surname>Banerjee</surname>
<given-names>N. T.</given-names>
</name>
<name>
<surname>Clark</surname>
<given-names>T. K.</given-names>
</name>
<name>
<surname>Hayman</surname>
<given-names>A. P.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Assessing physiological signal utility and sensor burden in estimating trust, situation awareness, and mental workload</article-title>. <source>J. Cognitive Eng. Decis. Mak.</source>, <fpage>15553434241310084</fpage>. <pub-id pub-id-type="doi">10.1177/15553434241310084</pub-id>
</citation>
</ref>
<ref id="B134">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cellan-Jones</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The year when VR goes from virtual to reality</article-title>. <source>BBC News.</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.bbc.com/news/technology-35205783">https://www.bbc.com/news/technology-35205783</ext-link>.</citation>
</ref>
<ref id="B19">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Chang</surname>
<given-names>H. L.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>H. C.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Tsaih</surname>
<given-names>R. H.</given-names>
</name>
<name>
<surname>Pu</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>How design features lead to visitors&#x27; visit intention through virtual reality experience: the case of national palace museum</article-title>,&#x201d; in <source>Proceedings of the 22nd pacific asia conference on information systems - opportunities and challenges for the digitized society: are we ready? PACIS 2018</source>.</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Che</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Hui</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>Three-dimensional (3D) stimuli are always better than two-dimensional (2D) multi-tasking? A high cognitive load in 3D-MATB-II</article-title>. <source>Behav. Brain Res.</source>, <volume>477</volume>, <fpage>115322</fpage>. <pub-id pub-id-type="doi">10.1016/j.bbr.2024.115322</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>The development characteristics of virtual reality after the year of VR</article-title>,&#x201d; in <source>Proceedings - 2020 international conference on innovation design and digital technology, ICIDDT 2020</source>.</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>C.-H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>The effect of visual feedback types on the wait indicator interface of a mobile application</article-title>. <source>Displays</source> <volume>61</volume>, <fpage>101928</fpage>. <pub-id pub-id-type="doi">10.1016/j.displa.2019.101928</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>C. H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>A study on the errors in time perception and waiting experiences of user interface design for mobile devices</article-title>. <source>J. Des.</source> <volume>27</volume> (<issue>4</issue>), <fpage>1</fpage>&#x2013;<lpage>19</lpage>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.scopus.com/inward/record.uri?eid=2-s2.0-85165996544&#x26;partnerID=40&#x26;md5=b4fbf539ce6653d5f84fccd454a1c0c5">https://www.scopus.com/inward/record.uri?eid&#x3d;2-s2.0-85165996544&#x26;partnerID&#x3d;40&#x26;md5&#x3d;b4fbf539ce6653d5f84fccd454a1c0c5</ext-link>.</comment>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Qian</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Pan</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The effects of mobile applications&#x2019; passive and interactive loading screen types on waiting experience</article-title>. <source>Behav. and Inf. Technol.</source> <volume>43</volume>, <fpage>1652</fpage>&#x2013;<lpage>1663</lpage>. <pub-id pub-id-type="doi">10.1080/0144929X.2023.2224901</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sheng</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2024</year>). &#x201c;<article-title>Enhancing positive emotions through interactive virtual reality experiences: an EEG-based investigation</article-title>,&#x201d; in <source>Proceedings - 2024 IEEE conference on virtual reality and 3D user interfaces, VR 2024</source>.</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheymol</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Fribourg</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lecuyer</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Normand</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Argelaguet</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Beyond my real body: characterization, impacts, applications and perspectives of &#x27;dissimilar&#x27; avatars in virtual reality</article-title>. <source>IEEE Trans. Vis. Comput. Graph.</source> <volume>29</volume> (<issue>11</issue>), <fpage>4426</fpage>&#x2013;<lpage>4437</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2023.3320209</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chiossi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Ou</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Gerhardt</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Putze</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Mayer</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Designing and evaluating an adaptive virtual reality system using EEG frequencies to balance internal and external attention states</article-title>. <source>Int. J. Human-Computer Stud.</source>, <volume>196</volume>, <fpage>103433</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijhcs.2024.103433</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chiossi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Welsch</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Villa</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Chuang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Mayer</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Virtual reality adaptation using electrodermal activity to support the user experience</article-title>. <source>Big Data Cognitive Comput.</source> <volume>6</volume> (<issue>2</issue>), <fpage>55</fpage>. <comment>Article 55</comment>. <pub-id pub-id-type="doi">10.3390/bdcc6020055</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Company</surname>
<given-names>H. D.</given-names>
</name>
</person-group> (<year>2021</year>). <source>HP Reverb</source>. <edition>G2 Omnicept Edition</edition>. <publisher-name>HP&#xae; Official Site</publisher-name>.</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cruz</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Coluci</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Moraes</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>ORUN-VR2: a VR serious game on the projectile kinematics: design, evaluation, and learning outcomes</article-title>. <source>Virtual Real.</source>, <fpage>1</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1007/s10055-023-00824-w</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Csikszentmihalyi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Csikszentmihalyi</surname>
<given-names>I. S.</given-names>
</name>
</person-group> (<year>1992</year>). <source>Optimal experience: psychological studies of flow in consciousness</source>. <publisher-name>Cambridge, United Kingdom: Cambridge University Press</publisher-name>.</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cui</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Tian</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Bai</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>The role of valence, arousal, stimulus type, and temporal paradigm in the effect of emotion on time perception: a meta-analysis</article-title>. <source>Psychonomic Bull. Rev.</source> <volume>30</volume> (<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.3758/s13423-022-02148-3</pub-id>
</citation>
</ref>
<ref id="B137">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Deci</surname>
<given-names>E. L.</given-names>
</name>
<name>
<surname>Ryan</surname>
<given-names>R. M.</given-names>
</name>
</person-group> (<year>1985</year>). <source>Intrinsic Motivation and Self-Determination in Human Behavior</source>. <publisher-name>Berlin: Springer</publisher-name>. <pub-id pub-id-type="doi">10.1007/978-1-4899-2271-7</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>de Carvalho</surname>
<given-names>B. J. A.</given-names>
</name>
<name>
<surname>Soares</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>das Neves</surname>
<given-names>A. M. M.</given-names>
</name>
<name>
<surname>Medeiros</surname>
<given-names>R. P.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Interactive doodles: a comparative analysis of the usability and playability of Google trademark games between 2010 and 2012. Design, user experience, and usability. Health</article-title>. <source>Learn. Play. Cult. Cross-Cultural User Exp</source>. <pub-id pub-id-type="doi">10.1007/978-3-642-39241-2_56</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>De Witte</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Reynaert</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Hutain</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kieken</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Jabbour</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Possik</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Immersive learning of factual knowledge while assessing the influence of cognitive load and spatial abilities</article-title>. <source>Comput. and Educ. X Real.</source> <volume>5</volume>, <fpage>100085</fpage>. <pub-id pub-id-type="doi">10.1016/j.cexr.2024.100085</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dey</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Barde</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Sareen</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Dobbins</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Goh</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Effects of interacting with facial expressions and controllers in different virtual environments on presence, usability, affect, and neurophysiological signals</article-title>. <source>Int. J. Hum. Comput. Stud.</source> <volume>160</volume>, <fpage>102762</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijhcs.2021.102762</pub-id>
</citation>
</ref>
<ref id="B138">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dubovi</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Cognitive and emotional engagement while learning with VR: The perspective of multimodal methodology [Article]</article-title>. <source>Comput. Educ.</source> <volume>183</volume>. <pub-id pub-id-type="doi">10.1016/j.compedu.2022.104495</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dzardanova</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Nikolakopoulou</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Kasapakis</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Vosinakis</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Xenakis</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Gavalas</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Exploring the impact of non-verbal cues on user experience in immersive virtual reality</article-title>. <source>Comput. Animat. Virtual Worlds</source> <volume>35</volume> (<issue>1</issue>), <fpage>e2224</fpage>. <pub-id pub-id-type="doi">10.1002/cav.2224</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ericson</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Reimagining the role of friction in experience design</article-title>. <source>J. User Exp.</source> <volume>17</volume> (<issue>4</issue>).</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Faul</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Erdfelder</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Buchner</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Lang</surname>
<given-names>A.-G.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Statistical power analyses using G&#x2a; Power 3.1: tests for correlation and regression analyses</article-title>. <source>Behav. Res. methods</source> <volume>41</volume> (<issue>4</issue>), <fpage>1149</fpage>&#x2013;<lpage>1160</lpage>. <pub-id pub-id-type="doi">10.3758/brm.41.4.1149</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fisher</surname>
<given-names>J. T.</given-names>
</name>
<name>
<surname>Keene</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Huskey</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Weber</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The limited capacity model of motivated mediated message processing: taking stock of the past</article-title>. <source>Ann. Int. Commun. Assoc.</source> <volume>42</volume> (<issue>4</issue>), <fpage>270</fpage>&#x2013;<lpage>290</lpage>. <pub-id pub-id-type="doi">10.1080/23808985.2018.1534552</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Foy</surname>
<given-names>H. J.</given-names>
</name>
<name>
<surname>Chapman</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Mental workload is reflected in driver behaviour, physiology, eye movements and prefrontal cortex activation</article-title>. <source>Appl. Ergon.</source> <volume>73</volume>, <fpage>90</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1016/j.apergo.2018.06.006</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gable</surname>
<given-names>P. A.</given-names>
</name>
<name>
<surname>Wilhelm</surname>
<given-names>A. L.</given-names>
</name>
<name>
<surname>Poole</surname>
<given-names>B. D.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>How does emotion influence time perception? A review of evidence linking emotional motivation and time processing</article-title>. <source>Front. Psychol.</source> <volume>13</volume>, <fpage>848154</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2022.848154</pub-id>
</citation>
</ref>
<ref id="B141">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Garson</surname>
<given-names>O. T.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Relativity: A hot stove and a pretty girl</article-title>. <source>Quote Investigator.</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://quoteinvestigator.com/2014/11/24/hot-stove/">https://quoteinvestigator.com/2014/11/24/hot-stove/</ext-link>.</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Godefroit-Winkel</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Schill</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Diop-Sall</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Does environmental corporate social responsibility increase consumer loyalty?</article-title> <source>Int. J. Retail and Distribution Manag.</source> <volume>50</volume> (<issue>4</issue>), <fpage>417</fpage>&#x2013;<lpage>436</lpage>. <pub-id pub-id-type="doi">10.1108/ijrdm-08-2020-0292</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gowrisankaran</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Nahar</surname>
<given-names>N. K.</given-names>
</name>
<name>
<surname>Hayes</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Sheedy</surname>
<given-names>J. E.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Asthenopia and blink rate under visual and cognitive loads</article-title>. <source>Optometry Vis. Sci.</source> <volume>89</volume> (<issue>1</issue>), <fpage>97</fpage>&#x2013;<lpage>104</lpage>. <pub-id pub-id-type="doi">10.1097/OPX.0b013e318236dd88</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Gupta</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Hajika</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Pai</surname>
<given-names>Y. S.</given-names>
</name>
<name>
<surname>Duenser</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Lochner</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Billinghurst</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>In AI we trust: investigating the relationship between biosignals, trust and cognitive load</article-title>,&#x201d; in <source>
<italic>VR</italic> proceedings of the 25th ACM symposium on virtual reality software and technology</source> (<publisher-loc>Parramatta, NSW, Australia</publisher-loc>). <pub-id pub-id-type="doi">10.1145/3359996.3364276</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Halbig</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Latoschik</surname>
<given-names>M. E.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>A systematic review of physiological measurements, factors, methods, and applications in virtual reality</article-title>. <source>Front. Virtual Real.</source> <volume>2</volume>. <pub-id pub-id-type="doi">10.3389/frvir.2021.694567</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Harris</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wilson</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Vine</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Development and validation of a simulation workload measure: the simulation task load index (SIM-TLX)</article-title>. <source>Virtual Real.</source> <volume>24</volume> (<issue>4</issue>), <fpage>557</fpage>&#x2013;<lpage>566</lpage>. <pub-id pub-id-type="doi">10.1007/s10055-019-00422-9</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hart</surname>
<given-names>S. G.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Nasa-task load index (NASA-TLX); 20 Years later</article-title>. <source>Proc. Hum. Factors Ergonomics Soc. Annu. Meet.</source> <volume>50</volume> (<issue>9</issue>), <fpage>904</fpage>&#x2013;<lpage>908</lpage>. <pub-id pub-id-type="doi">10.1177/154193120605000909</pub-id>
</citation>
</ref>
<ref id="B48">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Hart</surname>
<given-names>S. G.</given-names>
</name>
<name>
<surname>Staveland</surname>
<given-names>L. E.</given-names>
</name>
</person-group> (<year>1988</year>). <article-title>Development of NASA-TLX (task load index): results of empirical and theoretical research</article-title>. In, <person-group person-group-type="editor">
<name>
<surname>Hancock</surname>
<given-names>P. A.</given-names>
</name>
<name>
<surname>Meshkati</surname>
<given-names>N.</given-names>
</name>
</person-group> (Eds.), <source>Advances in psychology</source> (Vol. <volume>52</volume>, pp. <fpage>139</fpage>&#x2013;<lpage>183</lpage>). <publisher-name>North-Holland</publisher-name>. <pub-id pub-id-type="doi">10.1016/S0166-4115(08)62386-9</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hartfill</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bormann</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Riebandt</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>K&#xfc;hn</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Steinicke</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Objective agency measurement of different hand appearances in virtual reality with intentional binding</article-title>. <source>Virtual Real.</source> <volume>29</volume> (<issue>1</issue>), <fpage>14</fpage>. <pub-id pub-id-type="doi">10.1007/s10055-024-01085-x</pub-id>
</citation>
</ref>
<ref id="B50">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Heidrich</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Wohlan</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Schaller</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Perceived speed, frustration and enjoyment of interactive and passive loading scenarios in virtual reality</article-title>,&#x201d; in <source>Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics)</source>.</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Igarz&#xe1;bal</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Hruby</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Witowska</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Khoshnoud</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wittmann</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>What happens while waiting in virtual reality? A comparison between a virtual and a real waiting situation concerning boredom, self-regulation, and the experience of time</article-title>. <source>Technol. Mind, Behav.</source> <volume>2</volume> (<issue>2</issue>). <pub-id pub-id-type="doi">10.1037/tmb0000038</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jacoby</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Stimulus&#x2010;organism&#x2010;response reconsidered: an evolutionary step in modeling (consumer) behavior</article-title>. <source>J. consumer Psychol.</source> <volume>12</volume> (<issue>1</issue>), <fpage>51</fpage>&#x2013;<lpage>57</lpage>. <pub-id pub-id-type="doi">10.1207/s15327663jcp1201_05</pub-id>
</citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jacucci</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Toward affective social interaction in VR</article-title>. <source>Interactions</source> <volume>24</volume> (<issue>4</issue>), <fpage>6</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1145/3097462</pub-id>
</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jording</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Vogel</surname>
<given-names>D. H. V.</given-names>
</name>
<name>
<surname>Viswanathan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Vogeley</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Dissociating passage and duration of time experiences through the intensity of ongoing visual change</article-title>. <source>Sci. Rep.</source> <volume>12</volume> (<issue>1</issue>), <fpage>8226</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-12063-1</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jou</surname>
<given-names>J. Y. H.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Y.-Y.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>H.-N.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>J.-M.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>The effect of affective states and the delays at different phases of a service delivery on the perceived waiting time</article-title>. <source>Sun Yat-Sen Manag. Rev.</source> (<issue>2</issue>), <fpage>487</fpage>. <pub-id pub-id-type="doi">10.6160/2006.06.07</pub-id>
</citation>
</ref>
<ref id="B143">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kahneman</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Slovic</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Tversky</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>1982</year>). <article-title>Judgment Under <italic>Uncertainty: Heuristics and Biases</italic>. Cambridge: University Press</article-title>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://books.google.com.tw/books?id=_0H8gwj4a1MC">https://books.google.com.tw/books?id=_0H8gwj4a1MC</ext-link>.</citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kanai</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Paffen</surname>
<given-names>C. L. E.</given-names>
</name>
<name>
<surname>Hogendoorn</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Verstraten</surname>
<given-names>F. A. J.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Time dilation in dynamic visual display</article-title>. <source>J. Vis.</source> <volume>6</volume> (<issue>12</issue>), <fpage>8</fpage>. <pub-id pub-id-type="doi">10.1167/6.12.8</pub-id>
</citation>
</ref>
<ref id="B57">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>C.-K.</given-names>
</name>
<name>
<surname>Jung</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Exploring consumer behavior in virtual reality tourism using an extended stimulus-organism-response model</article-title>. <source>J. Travel Res.</source> <volume>59</volume> (<issue>1</issue>), <fpage>69</fpage>&#x2013;<lpage>89</lpage>. <pub-id pub-id-type="doi">10.1177/0047287518818915</pub-id>
</citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Lazaro</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Development and validation of spatial disorientation scenarios using virtual reality and motion simulator</article-title>. <source>Appl. Ergon.</source> <volume>125</volume>, <fpage>104457</fpage>. <pub-id pub-id-type="doi">10.1016/j.apergo.2024.104457</pub-id>
</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kleygrewe</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Hutter</surname>
<given-names>R. I. V.</given-names>
</name>
<name>
<surname>Koedijk</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Oudejans</surname>
<given-names>R. R. D.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Virtual reality training for police officers: a comparison of training responses in VR and real-life training</article-title>. <source>Police Pract. Res.</source> <volume>25</volume> (<issue>1</issue>), <fpage>18</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1080/15614263.2023.2176307</pub-id>
</citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kosch</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Karolus</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zagermann</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Reiterer</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Schmidt</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Wo&#xc5;&#xb0;niak</surname>
<given-names>P. W.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>A survey on measuring cognitive workload in human-computer interaction</article-title>. <source>ACM Comput. Surv.</source> <volume>55</volume> (<issue>13s</issue>), <fpage>1</fpage>&#x2013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1145/3582272</pub-id>
</citation>
</ref>
<ref id="B139">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kourouthanassis</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Boletsis</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Bardaki</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Chasanidou</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Tourists responses to mobile augmented reality travel guides: The role of emotions on adoption behavior</article-title>. <source>Pervasive and Mobile Computing</source>. <volume>18</volume>, <fpage>71</fpage>&#x2013;<lpage>87</lpage>. <pub-id pub-id-type="doi">10.1016/j.pmcj.2014.08.009</pub-id>
</citation>
</ref>
<ref id="B61">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kurusathianpong</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Tangmanee</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Comparison of perceived waiting time between two lengths of progress indicator and two styles of graphics animation with perceived uncertainty as a covariate</article-title>,&#x201d; in <source>2018 seventh ICT international student project conference (ICT-ISPC)</source>.</citation>
</ref>
<ref id="B140">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lallemand</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Gronier</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Enhancing user experience during waiting time in HCI: contributions of cognitive psychology</article-title>. <source>DIS&#x2019;2012</source>.</citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lamotte</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Izaute</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Droit-Volet</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Awareness of time distortions and its relation with time judgment: a metacognitive approach</article-title>. <source>Conscious. Cognition</source> <volume>21</volume>(<issue>2</issue>), <fpage>835</fpage>&#x2013;<lpage>842</lpage>. <pub-id pub-id-type="doi">10.1016/j.concog.2012.02.012</pub-id>
</citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Landeck</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Alvarez Igarz&#xe1;bal</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Unruh</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Habenicht</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Khoshnoud</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wittmann</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Journey through a virtual tunnel: simulated motion and its effects on the experience of time</article-title>. <source>Front. Virtual Real.</source> <volume>3</volume>. <pub-id pub-id-type="doi">10.3389/frvir.2022.1059971</pub-id>
</citation>
</ref>
<ref id="B64">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lang</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>An untapped opportunity to reduce the friction of using VR headsets</article-title>. <source>Road to VR</source>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.roadtovr.com/reducing-vr-friction-pre-headset-selection-loading/">https://www.roadtovr.com/reducing-vr-friction-pre-headset-selection-loading/</ext-link>. (Accessed on June 17, 2020).</comment>
</citation>
</ref>
<ref id="B65">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lataifeh</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ahmed</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Elbardawil</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Gordani</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Assessing the legibility of Arabic road signage using eye gazing and cognitive loading metrics</article-title>. <source>Computers</source> <volume>13</volume> (<issue>5</issue>), <fpage>123</fpage>. <pub-id pub-id-type="doi">10.3390/computers13050123</pub-id>
</citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Latifi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Blum</surname>
<given-names>S. C.</given-names>
</name>
<name>
<surname>Fowler</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Determinants of users&#x2019; intention to visit a destination: a virtual reality quality framework</article-title>. <source>J. Qual. Assur. Hosp. Tour.</source>, <fpage>1</fpage>&#x2013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1080/1528008X.2024.2440010</pub-id>
</citation>
</ref>
<ref id="B67">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Latini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Marcelli</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Di Giuseppe</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>D&#x27;Orazio</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Investigating the impact of greenery elements in office environments on cognitive performance, visual attention and distraction: an eye-tracking pilot-study in virtual reality</article-title>. <source>Appl. Ergon.</source> <volume>118</volume>, <fpage>104286</fpage>. <pub-id pub-id-type="doi">10.1016/j.apergo.2024.104286</pub-id>
</citation>
</ref>
<ref id="B68">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Measuring cognitive load with electroencephalography and self-report: focus on the effect of English-medium learning for Korean students</article-title>. <source>Educ. Psychol.</source> <volume>34</volume> (<issue>7</issue>), <fpage>838</fpage>&#x2013;<lpage>848</lpage>. <pub-id pub-id-type="doi">10.1080/01443410.2013.860217</pub-id>
</citation>
</ref>
<ref id="B69">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>Y. G.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>A. N.</given-names>
</name>
<name>
<surname>Hess</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The online waiting experience: using temporal information and distractors to make online waits feel shorter</article-title>. <source>J. Assoc. Inf. Syst.</source> <volume>18</volume> (<issue>3</issue>), <fpage>231</fpage>&#x2013;<lpage>263</lpage>. <pub-id pub-id-type="doi">10.17705/1jais.00452</pub-id>
</citation>
</ref>
<ref id="B70">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Levine</surname>
<given-names>S. R.</given-names>
</name>
<name>
<surname>Wyer Jr</surname>
<given-names>R. S.</given-names>
</name>
<name>
<surname>Schwarz</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>1994</year>). <article-title>Are you what you feel? The affective and cognitive determinants of self-judgments</article-title>. <source>Eur. J. Soc. Psychol.</source> <volume>24</volume> (<issue>1</issue>), <fpage>63</fpage>&#x2013;<lpage>77</lpage>. <pub-id pub-id-type="doi">10.1002/ejsp.2420240105</pub-id>
</citation>
</ref>
<ref id="B71">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Cai</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>An improvement on the progress bar: make it a story, make it a game Advances in Intelligent Systems and Computing</article-title>.</citation>
</ref>
<ref id="B72">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Liang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2023</year>). &#x201c;<article-title>Performance analysis of improvemental LOD technology under VR headsets</article-title>,&#x201d; in <source>Third international conference on artificial intelligence, virtual reality, and visualization (AIVRV 2023)</source>.</citation>
</ref>
<ref id="B73">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Liao</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Su</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). &#x201c;<article-title>Data-driven spatio-temporal analysis via multi-modal zeitgebers and cognitive load in VR</article-title>,&#x201d; in <source>Proceedings - 2020 IEEE conference on virtual reality and 3D user interfaces, VR 2020</source>.</citation>
</ref>
<ref id="B74">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>HTC Vive participates in Shanghai MWC on a large scale! Brings many interesting VR applications such as Transformers 5 shooting game</article-title>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://lpcomment.com/2017/06/29/htc-mwcs-17-vive/files/2687/htc-mwcs-17-vive.html">https://lpcomment.com/2017/06/29/htc-mwcs-17-vive/files/2687/htc-mwcs-17-vive.html</ext-link>.</comment>
</citation>
</ref>
<ref id="B142">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>An attention-based approach for assessing the effectiveness of emotion-evoking in immersive environment [Article]</article-title>. <source>Heliyon.</source> <volume>10</volume> (<issue>3</issue>), <fpage>e25017</fpage>. <pub-id pub-id-type="doi">10.1016/j.heliyon.2024.e25017</pub-id>
</citation>
</ref>
<ref id="B75">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Lofca</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Jerald</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Costa</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kopper</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2023</year>). &#x201c;<article-title>Does adding physical realism to virtual reality training reduce time compression</article-title>,&#x201d; in <source>Proceedings - 2023 IEEE conference on virtual reality and 3D user interfaces abstracts and workshops, VRW 2023</source>.</citation>
</ref>
<ref id="B76">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Lovasz-Bukvova</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>H&#xf6;lzl</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kormann-Hainzl</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Moser</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zigart</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Schlund</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2021</year>). <source>Usability and task load of applications in augmented and virtual reality. Systems, software and services process improvement</source>. <publisher-loc>Cham</publisher-loc>.</citation>
</ref>
<ref id="B77">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lozano</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Modalities and aesthetics of waiting in video games</article-title>. <source>Artnodes</source> <volume>2022</volume> (<issue>30</issue>). <pub-id pub-id-type="doi">10.7238/artnodes.v0i30.398865</pub-id>
</citation>
</ref>
<ref id="B78">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mala Kalaiarasan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Vafaei-Zadeh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hanifah</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Ramayah</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Can we engage players with extended reality in gaming applications? A Stimulus-Organism-Response framework</article-title>. <source>Entertain. Comput.</source> <volume>50</volume>, <fpage>100651</fpage>. <pub-id pub-id-type="doi">10.1016/j.entcom.2024.100651</pub-id>
</citation>
</ref>
<ref id="B79">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Matthews</surname>
<given-names>W. J.</given-names>
</name>
<name>
<surname>Meck</surname>
<given-names>W. H.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Temporal cognition: connecting subjective time to perception, attention, and memory</article-title>. <source>Psychol. Bull.</source> <volume>142</volume> (<issue>8</issue>), <fpage>865</fpage>&#x2013;<lpage>907</lpage>. <pub-id pub-id-type="doi">10.1037/bul0000045</pub-id>
</citation>
</ref>
<ref id="B80">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mehrabian</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Russell</surname>
<given-names>J. A.</given-names>
</name>
</person-group> (<year>1974</year>). <source>An approach to environmental psychology</source>. <publisher-name>the MIT Press</publisher-name>.</citation>
</ref>
<ref id="B81">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mondellini</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Colombo</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Arlati</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lawson</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Cobb</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Human factors and ergonomics</article-title>. In <source>Roadmapping extended reality</source> (pp. <fpage>229</fpage>&#x2013;<lpage>256</lpage>). <pub-id pub-id-type="doi">10.1002/9781119865810.ch10</pub-id>
</citation>
</ref>
<ref id="B82">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mostajeran</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Fischer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Steinicke</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>K&#xfc;hn</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Effects of exposure to immersive computer-generated virtual nature and control environments on affect and cognition</article-title>. <source>Sci. Rep.</source> <volume>13</volume> (<issue>1</issue>), <fpage>220</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-26750-6</pub-id>
</citation>
</ref>
<ref id="B83">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mullen</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Davidenko</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Time compression in virtual reality</article-title>. <source>Timing and Time Percept.</source> <volume>9</volume> (<issue>4</issue>), <fpage>377</fpage>&#x2013;<lpage>392</lpage>. <pub-id pub-id-type="doi">10.1163/22134468-bja10034</pub-id>
</citation>
</ref>
<ref id="B84">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Myers</surname>
<given-names>B. A.</given-names>
</name>
</person-group> (<year>1985</year>). <article-title>The importance of percent-done progress indicators for computer-human interfaces</article-title>. <source>ACM SIGCHI Bull.</source> <volume>16</volume> (<issue>4</issue>), <fpage>11</fpage>&#x2013;<lpage>17</lpage>. <pub-id pub-id-type="doi">10.1145/1165385.317459</pub-id>
</citation>
</ref>
<ref id="B85">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Nielsen</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1994</year>). <source>Usability engineering</source>. <publisher-name>San Francisco, CA: Morgan Kaufmann</publisher-name>.</citation>
</ref>
<ref id="B86">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Niknam</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Picard</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Rondinelli</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Botev</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <source>Some times fly: the effects of engagement and environmental dynamics on time perception in virtual reality proceedings of the 30th ACM symposium on virtual reality software and technology</source>. <publisher-loc>Germany</publisher-loc>: <publisher-name>Trier</publisher-name>. <pub-id pub-id-type="doi">10.1145/3641825.3687726</pub-id>
</citation>
</ref>
<ref id="B144">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Novotny</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Laidlaw</surname>
<given-names>D. H.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Evaluating Text Reading Speed in VR Scenes and 3D Particle Visualizations</article-title>. <source>IEEE Transactions on Visualization and Computer Graphics,</source> <volume>30</volume> (<issue>5</issue>), <fpage>2602</fpage>&#x2013;<lpage>2612</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2024.3372093</pub-id>
</citation>
</ref>
<ref id="B87">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Paas</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Tuovinen</surname>
<given-names>J. E.</given-names>
</name>
<name>
<surname>Tabbers</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Van Gerven</surname>
<given-names>P. W. M.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Cognitive load measurement as a means to advance cognitive load theory</article-title>. <source>Educ. Psychol.</source> <volume>38</volume> (<issue>1</issue>), <fpage>63</fpage>&#x2013;<lpage>71</lpage>. <pub-id pub-id-type="doi">10.1207/S15326985EP3801_8</pub-id>
</citation>
</ref>
<ref id="B88">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pedroli</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Greci</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Colombo</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Serino</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cipresso</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Arlati</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Characteristics, usability, and users experience of a system combining cognitive and physical therapy in a virtual environment: positive bike</article-title>. <source>Sensors</source> <volume>18</volume> (<issue>7</issue>), <fpage>2343</fpage>. <pub-id pub-id-type="doi">10.3390/s18072343</pub-id>
</citation>
</ref>
<ref id="B89">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pibernik</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Doli&#x107;</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Mandi&#x107;</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Kova&#x10d;</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Mobile-application loading-animation design and implementation optimization</article-title>. <source>Appl. Sci. Switz.</source> <volume>13</volume> (<issue>2</issue>), <fpage>865</fpage>. <pub-id pub-id-type="doi">10.3390/app13020865</pub-id>
</citation>
</ref>
<ref id="B90">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Picard</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Botev</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). &#x201c;<article-title>Rhythmic stimuli and time experience in virtual reality</article-title>,&#x201d; in <source>Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics)</source>.</citation>
</ref>
<ref id="B91">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Qu</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Che</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Bio-physiological-signals-based VR cybersickness detection [Article]</article-title>. <source>CCF Trans. Pervasive Comput. Interact.</source> <volume>4</volume> (<issue>3</issue>), <fpage>268</fpage>&#x2013;<lpage>284</lpage>. <pub-id pub-id-type="doi">10.1007/s42486-022-00103-8</pub-id>
</citation>
</ref>
<ref id="B92">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Raees</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ullah</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>THE-3DI: tracing head and eyes for 3D interactions: an interaction technique for virtual environments</article-title>. <source>Multimedia Tools Appl.</source> <volume>79</volume> (<issue>1-2</issue>), <fpage>1311</fpage>&#x2013;<lpage>1337</lpage>. <pub-id pub-id-type="doi">10.1007/s11042-019-08305-6</pub-id>
</citation>
</ref>
<ref id="B93">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Reddy</surname>
<given-names>G. S. R.</given-names>
</name>
<name>
<surname>Spencer</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Durkee</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Cox</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Fox Cotton</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Galbreath</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Estimating cognitive load and cybersickness of pilots in VR simulations via unobtrusive physiological sensors</article-title>. <fpage>251</fpage>, <lpage>269</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-031-06015-1_18</pub-id>
</citation>
</ref>
<ref id="B94">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rhiu</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>Y. M.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Yun</surname>
<given-names>M. H.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>The evaluation of user experience of a human walking and a driving simulation in the virtual reality</article-title>. <source>Int. J. Industrial Ergonomics</source> <volume>79</volume>, <fpage>103002</fpage>. <pub-id pub-id-type="doi">10.1016/j.ergon.2020.103002</pub-id>
</citation>
</ref>
<ref id="B95">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Riva</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Mantovani</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Capideville</surname>
<given-names>C. S.</given-names>
</name>
<name>
<surname>Preziosa</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Morganti</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Villani</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2007</year>). <article-title>Affective interactions using virtual reality: the link between presence and emotions</article-title>. <source>Cyberpsychology and Behav.</source> <volume>10</volume> (<issue>1</issue>), <fpage>45</fpage>&#x2013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1089/cpb.2006.9993</pub-id>
</citation>
</ref>
<ref id="B96">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rodr&#xed;guez-Fern&#xe1;ndez</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>den Berg</surname>
<given-names>A. v.</given-names>
</name>
<name>
<surname>Cucinella</surname>
<given-names>S. L.</given-names>
</name>
<name>
<surname>Lobo-Prat</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Font-Llagunes</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Marchal-Crespo</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Immersive virtual reality for learning exoskeleton-like virtual walking: a feasibility study</article-title>. <source>J. NeuroEngineering Rehabilitation</source> <volume>21</volume> (<issue>1</issue>), <fpage>195</fpage>. <pub-id pub-id-type="doi">10.1186/s12984-024-01482-y</pub-id>
</citation>
</ref>
<ref id="B146">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sarkar</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bansal</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lea</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bura</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Artificial Intelligence for Virtual Reality 1 Virtual reality: A simulated experience: a comprehensive view. De Gruyter. Editors J. Hemanth, M. Bhatia, and I. De La Torre Diez</article-title>, <fpage>1</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1515/9783110713817-001</pub-id>
</citation>
</ref>
<ref id="B97">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seawright</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Sampson</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>A video method for empirically studying wait-perception bias</article-title>. <source>J. Operations Manag.</source> <volume>25</volume>, <fpage>1055</fpage>&#x2013;<lpage>1066</lpage>. <pub-id pub-id-type="doi">10.1016/j.jom.2006.10.006</pub-id>
</citation>
</ref>
<ref id="B98">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shelton</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Nesbitt</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Thorpe</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Eidels</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Gauging the utility of ambient displays by measuring cognitive load</article-title>. <source>Cognition, Technol. Work</source> <volume>23</volume> (<issue>3</issue>), <fpage>459</fpage>&#x2013;<lpage>480</lpage>. <pub-id pub-id-type="doi">10.1007/s10111-020-00639-8</pub-id>
</citation>
</ref>
<ref id="B99">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Siegel</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Gomes</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Oliviera</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sundaramoorthy</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Smathers</surname>
<given-names>K.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>HP Omnicept cognitive load database (HPO-CLD)&#x2013;developing a multimodal inference engine for detecting real-time mental workload in VR</article-title>. <source>HP Labs</source>.</citation>
</ref>
<ref id="B100">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>&#x160;&#x137;ilters</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zari&#x146;a</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Luguzis</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Bali&#x146;a</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Umbra&#x161;ko</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Apse</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Color-emotion mappings and their demographic dependencies in digital environment: online test evidence using MS paint 7 color palette</article-title>. <source>Baltic J. Mod. Comput.</source> <volume>11</volume> (<issue>3</issue>), <fpage>354</fpage>&#x2013;<lpage>382</lpage>. <pub-id pub-id-type="doi">10.22364/bjmc.2023.11.3.01</pub-id>
</citation>
</ref>
<ref id="B101">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Skylark</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>How do changes in speed affect the perception of duration?</article-title> <source>J. Exp. Psychol. Hum. Percept. Perform.</source> <volume>37</volume>, <fpage>1617</fpage>&#x2013;<lpage>1627</lpage>. <pub-id pub-id-type="doi">10.1037/a0022193</pub-id>
</citation>
</ref>
<ref id="B147">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Smith</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>2016&#x2019;s five best virtual reality headsets</article-title>. <source>ZDNET.</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.zdnet.com/article/2016s-five-best-virtual-reality-headsets/">https://www.zdnet.com/article/2016s-five-best-virtual-reality-headsets/</ext-link>
</citation>
</ref>
<ref id="B102">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>S&#xf6;derstr&#xf6;m</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>B&#xe5;&#xe5;th</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Mejtoft</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>The Users&#x27; Time Perception: the effect of various animation speeds on loading screens</article-title>,&#x201d; in <source>Proceedings of the 36th European conference on cognitive ergonomics</source>. <publisher-loc>Utrecht, Netherlands</publisher-loc>. <pub-id pub-id-type="doi">10.1145/3232078.3232092</pub-id>
</citation>
</ref>
<ref id="B103">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Somarathna</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Bednarz</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Mohammadi</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Virtual reality for emotion elicitation - a review</article-title>. <source>IEEE Trans. Affect. Comput.</source> <volume>14</volume> (<issue>4</issue>), <fpage>2626</fpage>&#x2013;<lpage>2645</lpage>. <pub-id pub-id-type="doi">10.1109/TAFFC.2022.3181053</pub-id>
</citation>
</ref>
<ref id="B104">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Souchet</surname>
<given-names>A. D.</given-names>
</name>
<name>
<surname>Philippe</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lourdeaux</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Leroy</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Measuring visual fatigue and cognitive load via eye tracking while learning with virtual reality head-mounted displays: a review</article-title>. <source>Int. J. Human&#x2013;Computer Interact.</source> <volume>38</volume> (<issue>9</issue>), <fpage>801</fpage>&#x2013;<lpage>824</lpage>. <pub-id pub-id-type="doi">10.1080/10447318.2021.1976509</pub-id>
</citation>
</ref>
<ref id="B105">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Su</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Hsu</surname>
<given-names>M. K.</given-names>
</name>
<name>
<surname>Boostrom</surname>
<given-names>R. E.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>From recreation to responsibility: increasing environmentally responsible behavior in tourism</article-title>. <source>J. Bus. Res.</source> <volume>109</volume>, <fpage>557</fpage>&#x2013;<lpage>573</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbusres.2018.12.055</pub-id>
</citation>
</ref>
<ref id="B106">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Suh</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Prophet</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>The state of immersive technology research: a literature analysis</article-title>. <source>Comput. Hum. Behav.</source> <volume>86</volume>, <fpage>77</fpage>&#x2013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2018.04.019</pub-id>
</citation>
</ref>
<ref id="B149">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sun</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Duanmu</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ye</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Shi</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2018</year>). <source>Multi-path multi-tier 360-degree video streaming in 5G networks Proceedings of the 9th ACM Multimedia Systems Conference</source>, Amsterdam, Netherlands. <pub-id pub-id-type="doi">10.1145/3204949.3204978</pub-id>
</citation>
</ref>
<ref id="B107">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Surovaya</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Prayag</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Yung</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Khoo-Lattimore</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Telepresent or not? Virtual reality, service perceptions, emotions and post-consumption behaviors</article-title>. <source>Anatolia</source> <volume>31</volume> (<issue>4</issue>), <fpage>620</fpage>&#x2013;<lpage>635</lpage>. <comment>[Article]</comment>. <pub-id pub-id-type="doi">10.1080/13032917.2020.1808431</pub-id>
</citation>
</ref>
<ref id="B108">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sweller</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1988</year>). <article-title>Cognitive load during problem solving: effects on learning</article-title>. <source>Cognitive Sci.</source> <volume>12</volume> (<issue>2</issue>), <fpage>257</fpage>&#x2013;<lpage>285</lpage>. <pub-id pub-id-type="doi">10.1016/0364-0213(88)90023-7</pub-id>
</citation>
</ref>
<ref id="B109">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tastan</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Tuker</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Tong</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Using handheld user interface and direct manipulation for architectural modeling in immersive virtual reality: an exploratory study</article-title>. <source>Comput. Appl. Eng. Educ.</source> <volume>30</volume> (<issue>2</issue>), <fpage>415</fpage>&#x2013;<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1002/cae.22463</pub-id>
</citation>
</ref>
<ref id="B110">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Unruh</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Landeck</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Oberd&#xf6;rfer</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lugrin</surname>
<given-names>J.-L.</given-names>
</name>
<name>
<surname>Latoschik</surname>
<given-names>M. E.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The influence of avatar embodiment on time perception-towards vr for time-based therapy</article-title>. <source>Front. Virtual Real.</source> <volume>2</volume>, <fpage>658509</fpage>. <pub-id pub-id-type="doi">10.3389/frvir.2021.658509</pub-id>
</citation>
</ref>
<ref id="B111">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Urbano</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mortimer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Horan</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Stefan</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Antlej</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Using SIM-TLX to investigate the potential impacts on cognitive load while undertaking tasks in a virtual workplace</article-title>. <source>J. Workplace Learn.</source> <volume>36</volume> (<issue>7</issue>), <fpage>585</fpage>&#x2013;<lpage>604</lpage>. <pub-id pub-id-type="doi">10.1108/JWL-03-2024-0060</pub-id>
</citation>
</ref>
<ref id="B112">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Valtchanov</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ellard</surname>
<given-names>C. G.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Cognitive and affective responses to natural scenes: effects of low level visual properties on preference, cognitive load and eye-movements</article-title>. <source>J. Environ. Psychol.</source> <volume>43</volume>, <fpage>184</fpage>&#x2013;<lpage>195</lpage>. <pub-id pub-id-type="doi">10.1016/j.jenvp.2015.07.001</pub-id>
</citation>
</ref>
<ref id="B113">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>van der Ham</surname>
<given-names>I. J. M.</given-names>
</name>
<name>
<surname>Klaassen</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>van Schie</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Cuperus</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Elapsed time estimates in virtual reality and the physical world: the role of arousal and emotional valence</article-title>. <source>Comput. Hum. Behav.</source> <volume>94</volume>, <fpage>77</fpage>&#x2013;<lpage>81</lpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2019.01.005</pub-id>
</citation>
</ref>
<ref id="B114">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>van Weelden</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Wiltshire</surname>
<given-names>T. J.</given-names>
</name>
<name>
<surname>Alimardani</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Louwerse</surname>
<given-names>M. M.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Exploring the impact of virtual reality flight simulations on EEG neural patterns and task performance</article-title>. <source>Cognitive Syst. Res.</source> <volume>88</volume>, <fpage>101282</fpage>. <pub-id pub-id-type="doi">10.1016/j.cogsys.2024.101282</pub-id>
</citation>
</ref>
<ref id="B115">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Velev</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Zlateva</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Virtual reality challenges in education and training</article-title>. <source>Int. J. Learn.</source> <volume>3</volume> (<issue>1</issue>), <fpage>33</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.18178/IJLT.3.1.33-37</pub-id>
</citation>
</ref>
<ref id="B116">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vieira</surname>
<given-names>V. A.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Stimuli&#x2013;organism-response framework: a meta-analytic review in the store environment</article-title>. <source>J. Bus. Res.</source> <volume>66</volume> (<issue>9</issue>), <fpage>1420</fpage>&#x2013;<lpage>1426</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbusres.2012.05.009</pub-id>
</citation>
</ref>
<ref id="B145">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vorrink</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Wicaksono</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Fatahi</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Valilai</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Analyzing VR/AR Technology Capabilities for Enhancing the Effectiveness of Learning Processes with Focus on Gamification</article-title>. <source>Intell. Syst. Appl.</source> Cham.</citation>
</ref>
<ref id="B117">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Vorwerg-Gall</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Stamm</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Haink</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Virtual reality exergame in older patients with hypertension: a preliminary study to determine load intensity and blood pressure</article-title>. <source>BMC Geriatr.</source> <volume>23</volume> (<issue>1</issue>), <fpage>527</fpage>. <pub-id pub-id-type="doi">10.1186/s12877-023-04245-x</pub-id>
</citation>
</ref>
<ref id="B118">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wei&#xdf;</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Pfeiffer</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Consumer decisions in virtual commerce: predict good help-timing based on cognitive load</article-title>. <source>J. Neurosci. Psychol. Econ.</source> <volume>17</volume> (<issue>2</issue>), <fpage>119</fpage>&#x2013;<lpage>144</lpage>. <pub-id pub-id-type="doi">10.1037/npe0000191</pub-id>
</citation>
</ref>
<ref id="B119">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Whelan</surname>
<given-names>R. R.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Neuroimaging of cognitive load in instructional multimedia</article-title>. <source>Educ. Res. Rev.</source>, <volume>2</volume>(<issue>1</issue>), <fpage>1</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1016/j.edurev.2006.11.001</pub-id>
</citation>
</ref>
<ref id="B120">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wienrich</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>D&#xf6;llinger</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Kock</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Schindler</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Traupe</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Assessing user experience in virtual reality&#x2013;a comparison of different measurements. Design, User Experience</article-title>,&#x201d; in <conf-name>Theory and Practice: 7th International Conference, DUXU 2018, Held as Part of HCI International 2018</conf-name>, <conf-loc>Las Vegas, NV, USA</conf-loc>, <conf-date>July 15-20, 2018</conf-date>.</citation>
</ref>
<ref id="B121">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Wintersberger</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Klotz</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Riener</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <source>Tell me more: transparency and time-fillers to optimize Chatbots&#x27;Waiting time experience. ACM international conference proceeding series</source>.</citation>
</ref>
<ref id="B122">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Witowska</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Schmidt</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wittmann</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>What happens while waiting? How self-regulation affects boredom and subjective time during a real waiting situation</article-title>. <source>Acta Psychol.</source> <volume>205</volume>, <fpage>103061</fpage>. <pub-id pub-id-type="doi">10.1016/j.actpsy.2020.103061</pub-id>
</citation>
</ref>
<ref id="B123">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>D. Y.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>J. H. T.</given-names>
</name>
<name>
<surname>Bowman</surname>
<given-names>N. D.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Watching VR advertising together: how 3D animated agents influence audience responses and enjoyment to VR advertising</article-title>. <source>Comput. Hum. Behav.</source> <volume>133</volume>, <fpage>107255</fpage>. <comment>Article 107255</comment>. <pub-id pub-id-type="doi">10.1016/j.chb.2022.107255</pub-id>
</citation>
</ref>
<ref id="B124">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yuan</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>MEinVR: multimodal interaction techniques in immersive exploration</article-title>. <source>Vis. Inf.</source> <volume>7</volume> (<issue>3</issue>), <fpage>37</fpage>&#x2013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.1016/j.visinf.2023.06.001</pub-id>
</citation>
</ref>
<ref id="B125">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zagermann</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Pfeil</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Reiterer</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Measuring cognitive load using eye tracking technology in visual computing</article-title>. <source>Proc. sixth workshop beyond time errors Nov. Eval. methods Vis.</source>, <fpage>78</fpage>&#x2013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1145/2993901.2993908</pub-id>
</citation>
</ref>
<ref id="B126">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zakay</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Block</surname>
<given-names>R. A.</given-names>
</name>
</person-group> (<year>1995</year>). &#x201C;<article-title>An attentional-gate model of prospective time estimation</article-title>,&#x201D; in <source>Time and the Dynamic control of Behavior.</source> <publisher-loc>Editors M. Richelle, V. D. Keyser, G. D. Ydeualle, and A. Vandierendonck (Liege: University of Liege Press)</publisher-loc>, <fpage>167</fpage>&#x2013;<lpage>178</lpage>.</citation>
</ref>
<ref id="B127">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zakay</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Block</surname>
<given-names>R. A.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Temporal cognition</article-title>. <source>Curr. Dir. Psychol. Sci.</source> <volume>6</volume> (<issue>1</issue>), <fpage>12</fpage>&#x2013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1111/1467-8721.ep11512604</pub-id>
</citation>
</ref>
<ref id="B128">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zakay</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Hornik</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1991</year>). <article-title>How Much Time Did You Wait in Line?: A Time Perception Perspective,</article-title> <source>Time and Consumer Behaviour, Proceedings of the VIIth John-Labatt Marketing Research Seminar.</source> <publisher-name>Editors J.-C. Chebat and M. V. Venkatesan (Montr&#xe9;al: U.Q.A.M)</publisher-name>, <fpage>1</fpage>&#x2013;<lpage>18</lpage>.</citation>
</ref>
<ref id="B129">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zakay</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Nitzan</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Glicksohn</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1983</year>). <article-title>The influence of task difficulty and external tempo on subjective time estimation</article-title>. <source>Percept. and Psychophys.</source> <volume>34</volume>, <fpage>451</fpage>&#x2013;<lpage>456</lpage>. <pub-id pub-id-type="doi">10.3758/BF03203060</pub-id>
</citation>
</ref>
<ref id="B148">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zeynali</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hajiesmaili</surname>
<given-names>M. H.</given-names>
</name>
<name>
<surname>Sitaraman</surname>
<given-names>R. K.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>BOLA360: Near-optimal view and bitrate adaptation for 360-degree video streaming</article-title>. <source>Proceedings of the 15th ACM Multimedia Systems Conference</source>.</citation>
</ref>
<ref id="B130">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>H.-L.</given-names>
</name>
<name>
<surname>Tso</surname>
<given-names>S. H.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>To switch or not? Effects of spokes-character urgency during the social app loading process and app type on user switching intention</article-title>. <source>Front. Psychol.</source> <volume>14</volume>, <fpage>1110808</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2023.1110808</pub-id>
</citation>
</ref>
<ref id="B131">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Popescu</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>CloVR: fast-startup low-latency cloud VR</article-title>. <source>IEEE Trans. Vis. Comput. Graph.</source> <volume>30</volume> (<issue>5</issue>), <fpage>2337</fpage>&#x2013;<lpage>2346</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2024.3372059</pub-id>
</citation>
</ref>
<ref id="B132">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhu</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Jiang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>Q.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The effectiveness of social elements in virtual reality tourism: a mental imagery perspective</article-title>. <source>J. Hosp. Tour. Manag.</source> <volume>56</volume>, <fpage>135</fpage>&#x2013;<lpage>146</lpage>. <pub-id pub-id-type="doi">10.1016/j.jhtm.2023.05.024</pub-id>
</citation>
</ref>
<ref id="B133">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Zwiezen</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2020</year>). in <source>VR you can&#x27;t escape the loading screen</source>. <comment>Available online at: <ext-link ext-link-type="uri" xlink:href="https://kotaku.com/in-vr-you-cant-escape-the-loading-screen-1844248197">https://kotaku.com/in-vr-you-cant-escape-the-loading-screen-1844248197</ext-link>.</comment>
</citation>
</ref>
</ref-list>
</back>
</article>