<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2026.1654765</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Event-related potential evidence that working memory whether inside or outside a virtual reality environment can reduce the extent of attention capture by irrelevant novel stimuli</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Kamal</surname> <given-names>Farooq</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3183438/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Choudhury</surname> <given-names>Nusrat</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3114403/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Doiron</surname> <given-names>Alexandra</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3234479/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Sadorsky</surname> <given-names>Duncan</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3378181/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Campbell</surname> <given-names>Kenneth</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/49043/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Morrison</surname> <given-names>Cassandra</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3114152/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>National Research Council Canada</institution>, <city>Boucherville, QC</city>, <country country="ca">Canada</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Psychology, Carleton University</institution>, <city>Ottawa, ON</city>, <country country="ca">Canada</country></aff>
<aff id="aff3"><label>3</label><institution>School of Psychology, and Brain and Mind Institute, University of Ottawa</institution>, <city>Ottawa, ON</city>, <country country="ca">Canada</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Cassandra Morrison, <email xlink:href="mailto:cassandramorrison@cunet.carleton.ca">cassandramorrison@cunet.carleton.ca</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-18">
<day>18</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1654765</elocation-id>
<history>
<date date-type="received">
<day>26</day>
<month>06</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>30</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 His Majesty the King in Right of Canada.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>His Majesty the King in Right of Canada</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-18">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>If an individual is engaged in a visual task, the onset of a highly novel but unattended auditory stimulus may result in a switch of attention away from the processing of the task-at-hand and to the processing of the more potentially relevant auditory stimuli. This switch is called attention capture. An auditory deviant, representing a change to any feature of a frequently occurring standard stimulus, will automatically elicit an event-related potential, the deviant-related negativity (DRN). If the deviant is highly novel, it may also elicit a later positivity, the P3a, associated with the switching of attention. There is some evidence that carrying out a visual working memory (WM) task may reduce the extent of attention capture. Also, individuals carrying out a task within a virtual reality (VR) environment often report that they may not be aware of irrelevant external stimuli occurring in the external environment that might otherwise elicit attention capture.</p>
</sec>
<sec>
<title>Methods</title>
<p>Nineteen young adults were engaged in three visual tasks: watching a silent video (control), performing a delayed match-to-sample WM task in a VR environment and performing a somewhat similar WM task presented on a 2D monitor. A multi-feature auditory sequence was presented concurrently but this was irrelevant to the visual task and was to-be-ignored. The sequence consisted of a frequently occurring standard stimulus and six different rarely occurring deviants, created by changing a different feature of the standard.</p>
</sec>
<sec>
<title>Results</title>
<p>All unattended auditory deviants elicited a significant DRN, reflecting robust automatic detection of auditory change. The nature of the visual task had no significant effect on the DRN. Only highly novel deviants (white noise, environmental sounds) elicited a P3a when participants watched the video. This P3a was significantly reduced during both the VR WM and 2D WM tasks.</p>
</sec>
<sec>
<title>Discussion</title>
<p>These findings suggest that early processes associated with detection of acoustic change operate automatically, regardless of the demands of the visual task. On the other hand, the P3a, associated with attention-capture and the switching of attention from the task-at-hand, was reduced in the VR a WM task. It was, however, also reduced in the 2D WM task. It is thus not clear whether the VR environment or the fact that participants were engaged in a WM task was responsible for the reduction of the P3a.</p>
</sec>
</abstract>
<kwd-group>
<kwd>auditory deviants</kwd>
<kwd>DRN</kwd>
<kwd>event-related potentials</kwd>
<kwd>MMN</kwd>
<kwd>P3a</kwd>
<kwd>switching of attention</kwd>
<kwd>virtual reality</kwd>
<kwd>working memory</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This project was supported by funding from the National Research Council of Canada&#x2019;s Aging in Place Challenge program (AiP-012). CM was funded by a grant from the National Science and Engineering Research Council (NSERC) of Canada.</funding-statement>
</funding-group>
<counts>
<fig-count count="6"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="80"/>
<page-count count="17"/>
<word-count count="14505"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Auditory Cognitive Neuroscience</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>A primary role of a frontoparietal central executive network is to establish processing priorities and to focus attention on those cognitive tasks deemed to have high priority. Noisy environments may interfere with and disrupt the ability to focus attention on the task-at-hand. There is good evidence that brain networks can adapt or habituate to continuous background noise (<xref ref-type="bibr" rid="B70">Sussman and Winkler, 2001</xref>). On the other hand, it is much more difficult to inhibit the impact of infrequently occurring, brief-lasting transient acoustic stimuli. Even though a task may be deemed to have high priority for processing, certain auditory transients occurring within the environment but outside the current focus of attention will nevertheless be involuntarily processed, causing an interruption of the central executive. This interruption, known as attention capture, or involuntary attention, results in a switch of attention from the processing demands of a current cognitive task and toward the processing of a potentially much more relevant auditory event (<xref ref-type="bibr" rid="B21">James, 1890</xref>). Most unattended input is however incidental. Thus, the switching of attention to these inputs does come at a cost. During attention capture, processing resources are switched to environmental input that is irrelevant to the task-at-hand; therefore, performance on the relevant cognitive task may deteriorate. This process is called distraction and is an obstacle to task execution (<xref ref-type="bibr" rid="B27">Lavie, 2005</xref>; <xref ref-type="bibr" rid="B15">Forster and Lavie, 2008</xref>; <xref ref-type="bibr" rid="B53">Parmentier, 2014</xref>). The present study examines how the extent of attention capture can be reduced.</p>
<p>Attention capture is often associated with auditory rather than visual stimuli (<xref ref-type="bibr" rid="B37">Morrison et al., 2020</xref>; <xref ref-type="bibr" rid="B53">Parmentier, 2014</xref>; <xref ref-type="bibr" rid="B66">Schr&#x00F6;ger, 1996</xref>). The use of auditory stimuli is because we hear auditory stimuli over a 360&#x00B0; space (in front of, beside, and behind our head) whereas vision is restricted to seeing stimuli in front of our head. Several behavioral studies have attempted to determine the extent to which unattended and irrelevant auditory distractors are processed (<xref ref-type="bibr" rid="B13">Escera et al., 2000</xref>; <xref ref-type="bibr" rid="B55">Parmentier et al., 2008</xref>). Behavioral performance on an assigned visual task might thus be poorer when unattended and irrelevant stimuli are presented compared to when they are not presented (<xref ref-type="bibr" rid="B54">Parmentier, 2016</xref>; <xref ref-type="bibr" rid="B5">Berti and Schr&#x00F6;ger, 2003</xref>; <xref ref-type="bibr" rid="B64">SanMiguel et al., 2010</xref>). Note that in these behavioral studies, the extent to which the irrelevant stimulus has been processed can only be implied based on performance. Directly quantifying the extent of processing of a to-be-ignored stimulus is difficult because the participant is not actively responding to it. The study of automaticity in information processing is facilitated by the recording of event-related potentials (ERPs), as they provide a means of determining the extent that to-be-ignored stimuli are processed. ERPs are the minute changes in the electrical activity of the brain that are elicited by a physical stimulus or an internal, psychological event. The ERPs consist of a series of negative- and positive-going components thought to reflect different aspects of information processing.</p>
<p>Auditory attention capture is often studied using a so-called oddball paradigm, consisting of a series of a frequently occurring &#x201C;standard&#x201D; stimulus and at rare (or &#x201C;odd&#x201D;) times, a &#x201C;deviant&#x201D; is created by changing a feature of the standard. The participant is often asked to focus attention on a visual task and to ignore the auditory channel. The auditory stimuli are therefore irrelevant to the visual task. Priority of processing is thus given to the visual task. As such, processing of the unattended auditory stimuli can only occur passively. Both the standard and deviant auditory stimuli will elicit an obligatory negative ERP component, N1, maximum in amplitude over frontocentral areas of the scalp. N1 occurs at about 100 ms after stimulus onset followed by a later positivity, P2 occurring at about 180&#x2013;200 ms. The deviant elicits an additional frontocentral maximum negativity, the mismatch negativity (MMN) occurring between 100 and 200 ms. The MMN is elicited by any change in a feature of the standard stimulus, including its frequency, intensity, duration or location (<xref ref-type="bibr" rid="B42">N&#x00E4;&#x00E4;t&#x00E4;nen, 1990</xref>). In the original N&#x00E4;&#x00E4;t&#x00E4;nen model, a change detection system stores the extracted features of all auditory input in a rapidly fading sensory memory. With each occurrence of the standard, its representation in sensory memory improves. When a deviant is presented, at least one of its features fails to match that of the stored representation of the standard, and change is detected. The output of the change detection system is reflected in the MMN. A more recent model claims that the MMN does not necessarily reflect detection of a physical change to the standard stimulus but rather a violation of predictions formed based on the pattern or rules of stimulus presentation (<xref ref-type="bibr" rid="B43">N&#x00E4;&#x00E4;t&#x00E4;nen et al., 2011</xref>; <xref ref-type="bibr" rid="B49">Paavilainen, 2013</xref>; <xref ref-type="bibr" rid="B79">Winkler, 2007</xref>; <xref ref-type="bibr" rid="B80">Winkler et al., 2009</xref>). The oddball sequence is thus a special case in which the occurrence of a deviant violates the expectation for the presentation of the homogeneous, frequently-occurring standard stimulus. The MMN is thought to reflect a pre-attentive, pre-conscious detection of acoustic change. Thus, acoustic change is detected prior to awareness that a change of the auditory stimulus has occurred. Several studies have now indicated that the MMN can be robustly recorded following the presentation of a deviant stimulus regardless of the direction or strength of attention (<xref ref-type="bibr" rid="B40">Muller-Gass et al., 2005</xref>, <xref ref-type="bibr" rid="B41">2006</xref>; <xref ref-type="bibr" rid="B68">Sussman, 2007</xref>, <xref ref-type="bibr" rid="B69">2017</xref>; <xref ref-type="bibr" rid="B20">Hsu et al., 2023</xref>) or task demands (<xref ref-type="bibr" rid="B58">Ritter et al., 1999</xref>).</p>
<p>The MMN is best-observed in a deviant-standard difference wave. When the deviant is created by making only a small change to the standard (e.g., a 1,000 Hz standard and a 1,050 Hz deviant), the onset of both stimuli should elicit a very similar N1. The subtraction process will remove processes that are common to both the standard and the deviant (e.g., the N1), leaving only processing that is unique to the deviant, the MMN. The standard and deviant do not always elicit identical N1s. A deviant that represents an increase in auditory intensity or is highly novel (a stimulus in which a number of features change) will elicit a larger N1 than the standard. This larger N1 will therefore not be completely removed in the subtraction process. Because the novel deviant stimulus also signals change from the standard, it will also elicit an MMN. Unfortunately, the N1 and the MMN may overlap both temporally and spatially (they occur at similar time and are both maximum in amplitude over fronto-central areas of the scalp), resulting in the summation of the two negativities. What is observed in the deviant-standard difference wave is thus a large negativity representing a composite N1 + MMN, rather than a &#x201C;pure&#x201D; MMN. This composite negativity has been labeled as a deviant-related negativity or DRN (<xref ref-type="bibr" rid="B2">Alho et al., 1992</xref>; <xref ref-type="bibr" rid="B38">Muller-Gass and Schr&#x00F6;ger, 2007</xref>; <xref ref-type="bibr" rid="B33">Macdonald and Campbell, 2013</xref>; <xref ref-type="bibr" rid="B71">Tavakoli and Campbell, 2016</xref>). This convention will be used in the present study especially when a deviant is created by possible intensity change.</p>
<p>The rarely occurring deviant stimulus might also elicit a later centro-frontal maximum positivity, the P3a, peaking from 200 to 300 ms. While almost any perceptible change will elicit a DRN/MMN, only highly novel deviants will elicit the P3a when the auditory stimuli are to-be-ignored and are irrelevant to the task-at-hand (<xref ref-type="bibr" rid="B57">Polich, 2007</xref>). These deviants include environmental sounds and white noise whose frequency spectrum and stimulus energy (e.g., intensity) vary widely from the standard stimulus. It is the P3a that is thought to reflect processes associated with the involuntary switching of attention from the task-at-hand and toward the processing of the unattended acoustic change (<xref ref-type="bibr" rid="B12">Escera et al., 1998</xref>; <xref ref-type="bibr" rid="B53">Parmentier, 2014</xref>; <xref ref-type="bibr" rid="B78">Wetzel et al., 2013</xref>). The switching of attention to the auditory channel and the need for additional processing of the highly novel, salient auditory stimulus may then lead to eventual consciousness of it. Whether the P3a reflects the actual switching of attention (<xref ref-type="bibr" rid="B12">Escera et al., 1998</xref>) or processes that may lead to the switching of attention (<xref ref-type="bibr" rid="B78">Wetzel et al., 2013</xref>; <xref ref-type="bibr" rid="B53">Parmentier, 2014</xref>) remains disputed.</p>
<p>Several studies have examined how distraction by auditory stimuli resulting in deterioration in performance on a variety of tasks can be prevented by, for example, varying processing task demands (see reviews by <xref ref-type="bibr" rid="B27">Lavie, 2005</xref>, <xref ref-type="bibr" rid="B28">2010</xref>). A perceptual task that is very difficult will make more demands on the limited processing resources, leaving few for the co-processing of other irrelevant stimulus input. On the other hand, an easy perceptual task will leave many resources available for the co-processing of these irrelevant inputs. In general, when a perceptual task is relatively easy, distractors will have a larger effect on performance than when the task is particularly difficult. The results are nevertheless dependent on several factors including the nature of the task (perceptual versus working memory) and within-modality versus cross-modality interference (see reviews by <xref ref-type="bibr" rid="B27">Lavie, 2005</xref>, <xref ref-type="bibr" rid="B28">2010</xref>).</p>
<p>While there is general agreement that task demands have minimal effects on the MMN, there is some dispute about the extent to which the P3a is modulated by similar task demands. Initial studies of the auditory P3a indicated that it was largely an automatic process and its amplitude did not vary between easy and difficult visual tasks. For example, <xref ref-type="bibr" rid="B39">Muller-Gass et al. (2007)</xref> had participants engage in a continuous visual object tracking task which varied in difficulty. An auditory sequence was presented concurrently but it was irrelevant to the visual object tracking task. The continuous nature of the visual task was designed to prevent participants from sampling the auditory sequence, particularly when the task was very difficult. Thus, participants should not have been able to listen to (eavesdrop on) the auditory sequence while also attending the visual task. An auditory deviant created by increasing the intensity of the standard elicited a large P3a, but importantly this P3a was not affected by task demands. <xref ref-type="bibr" rid="B38">Muller-Gass and Schr&#x00F6;ger (2007)</xref> employed a two-stimulus duration detection task, participants being asked to determine the duration of the auditory stimuli. At rare times, a feature of the auditory stimuli was changed but this was irrelevant to the duration discrimination task. This deviant elicited a P3a, whose amplitude did not vary between easy and difficult duration detection conditions. Similarly, <xref ref-type="bibr" rid="B77">Volosin and Horv&#x00E1;th (2020)</xref> also used an easy and difficult duration discrimination task and observed that task difficulty did not affect the amplitude of the P3a.</p>
<p>Some studies have employed working memory (WM) tasks to investigate whether cognitive load can protect against auditory distraction. It has been well-established that the maintenance of items in WM requires active attention (<xref ref-type="bibr" rid="B3">Baddeley and Hitch, 1974</xref>; <xref ref-type="bibr" rid="B9">Cowan, 1995</xref>). Thus, the high WM load uses the cognitive resources that would otherwise be available for the processing of irrelevant auditory input. Most studies have used an n-back task to assess WM. In the <italic>n</italic>-back task, the participant is asked to determine whether the current stimulus matches a stimulus presented <italic>n</italic> trials earlier in the sequence. In the <xref ref-type="bibr" rid="B38">Muller-Gass and Schr&#x00F6;ger (2007)</xref> study, a 1-back memory task condition was also run. Participants were asked whether the present short or long duration auditory stimulus was the same duration as the one that had preceded it. The pitch of the frequently occurring standard was at times changed to form a deviant, but the pitch change was irrelevant to the 1-back memory task. The distractor deviant resulted in poorer memory performance. These performance results were similar to those reported by <xref ref-type="bibr" rid="B27">Lavie (2005)</xref>. In addition, a larger P3a was elicited when the participant had to decide whether the duration of the current auditory deviant was also presented in the previous trial (1-back condition) compared to when the participant had to decide about its duration (0-back condition). Thus, the <italic>n</italic>-back task seemed to <italic>enhance</italic> rather than protect against distraction. The effects of a distractor depend on several factors. In the <xref ref-type="bibr" rid="B39">Muller-Gass et al. (2007)</xref> study, the auditory distractor occurred within an auditory <italic>n</italic>-back task. Other studies have used a visual <italic>n</italic>-back. In these studies, auditory stimuli occur prior to visual stimuli, but they are irrelevant to the task (<xref ref-type="bibr" rid="B32">Lv et al., 2010</xref>; <xref ref-type="bibr" rid="B63">SanMiguel et al., 2008</xref>). These studies have reported that the P3a elicited by irrelevant deviants was reduced in amplitude when the <italic>n</italic>-back task was more demanding (<xref ref-type="bibr" rid="B32">Lv et al., 2010</xref>; <xref ref-type="bibr" rid="B63">SanMiguel et al., 2008</xref>). On the other hand, <xref ref-type="bibr" rid="B34">Mahajan et al. (2020)</xref> did not find that <italic>n</italic>-back task difficulty had a significant effect on the amplitude of the auditory P3a. In the <italic>n</italic>-back studies, the presentation of an irrelevant auditory stimulus prior to the relevant visual stimuli is problematic. While the auditory stimuli were irrelevant to the visual <italic>n</italic>-back, they could still have been used as a warning signal or as a cue to predict the subsequent occurrence of the visual target (<xref ref-type="bibr" rid="B4">Baragona et al., 2025</xref>; <xref ref-type="bibr" rid="B53">Parmentier, 2014</xref>). Thus, attending to the auditory sequence could have been used to improve performance on the visual WM task. As such, differences in the amplitude of the P3a may have been a result of passive compared to active processing of the deviant.</p>
<p>The manipulation of task demands is based on the assumption that the strong attentional focus required by a difficult task does not allow participants to also attend to and process the irrelevant auditory input. By contrast, during an easy task, additional attentional resources should be available to process the irrelevant auditory input. <xref ref-type="bibr" rid="B41">Muller-Gass et al. (2006)</xref> have however provided behavioral evidence that even during very difficult visual tasks, participants might still be able to actively attend to auditory input. They employed easy and difficult visual discrimination tasks while auditory stimuli were presented concurrently. In one condition, participants were required to overtly divide their attention between the auditory and visual channels and asked to detect rarely occurring targets in both modalities. Even when the visual task was quite difficult, when participants were asked to divide their attention between the two channels, they were able to successfully detect the rarely occurring visual <italic>and</italic> auditory targets. Thus, even when an auditory sequence is to-be-ignored, it may nevertheless be possible to eavesdrop on the auditory channel and still maintain a high level of performance on the visual task, even if the visual task is difficult and makes high demands on attentional resources.</p>
<p>Recent technological advances such as virtual reality (VR) offer a newer method to manipulate task engagement. The realistic 3D VR environment has been described as being highly &#x201C;immersive&#x201D; and thus extremely demanding of cognitive resources (<xref ref-type="bibr" rid="B1">Al Boustani et al., 2022</xref>). Individuals often experience an intense sense of &#x201C;presence&#x201D; such that the virtual environment becomes the dominant reality. These individuals may even be unaware of their real, external world (<xref ref-type="bibr" rid="B62">Sanchez-Vives and Slater, 2005</xref>). Engaging in a VR task may therefore be able to prevent the switching of attention to the irrelevant auditory input occurring in the external environment. The present study examines whether the amplitude of a P3a elicited by the occurrence of an unattended rare, novel deviant stimulus can be reduced by carrying out a visual WM task within a VR environment. Perhaps the most convincing evidence that VR tasks can be extremely demanding of cognitive resources comes from studies of how an extremely salient, but task-irrelevant stimulus, pain, can be inhibited (see <xref ref-type="bibr" rid="B18">Hadjiat and Marchand, 2022</xref> for a review). Engagement in a visual VR task has been shown to be associated with a reduction in participants&#x2019; rating of both acute and chronic pain (<xref ref-type="bibr" rid="B76">Viderman et al., 2023</xref>; <xref ref-type="bibr" rid="B73">Teh et al., 2024</xref>; <xref ref-type="bibr" rid="B46">Nagamine, 2025</xref>). Such ratings are however very subjective and prone to bias. ERP responses to unattended painful stimuli have been used to provide an objective measure of the extent to which these stimuli are processed. <xref ref-type="bibr" rid="B29">Lier et al. (2020)</xref> asked participants to watch video scenes within a VR environment. Painful electric shocks were presented occasionally but these were irrelevant to the VR task. In a control condition, participants saw a static image and again, the irrelevant electric shocks were presented. The pain stimulus elicited a late positivity occurring between 220 and 250 ms. This positivity is consistent with the P3a. The P3a following presentation of the electric shock was reduced in amplitude when the participants were actively engaged in watching the video compared to when they saw the static image within the VR environment.</p>
<p>In other studies, irrelevant auditory rather than pain stimuli have been presented. The results however vary; some studies show an effect of engaging in a VR task such that the amplitude of the ERPs to the irrelevant auditory stimuli are reduced, while others do not show the effect (<xref ref-type="bibr" rid="B1">Al Boustani et al., 2022</xref>; <xref ref-type="bibr" rid="B6">Burns and Fairclough, 2015</xref>; <xref ref-type="bibr" rid="B7">Chen et al., 2014</xref>; <xref ref-type="bibr" rid="B16">Grassini et al., 2021</xref>; <xref ref-type="bibr" rid="B25">Kober and Neuper, 2012</xref>; <xref ref-type="bibr" rid="B65">Sarasso et al., 2024</xref>; <xref ref-type="bibr" rid="B74">Terkildsen and Makransky, 2019</xref>). VR task demands and auditory stimulus parameters vary widely across studies, making generalization difficult. Moreover, in the oddball studies that have been run, the auditory deviant stimulus may not have been sufficiently salient to elicit a P3a, and induce a switch of attention from the VR task.</p>
<p>The present study employs a WM task adapted to the VR environment. A VR version of the <italic>n</italic>-back task using its highly realistic 3D capabilities has yet to be implemented. <xref ref-type="bibr" rid="B8">Climent et al. (2021)</xref> and <xref ref-type="bibr" rid="B24">Klotzsche et al. (2023)</xref> have developed a VR version of a delayed match-to-sample task. In this type of WM task, on a single trial, participants may see a different number of items and after a delay are presented with a probe. Participants are asked whether the probe matches what has previously been presented. These tasks were, however, designed to closely mimic procedures used in traditional 2D WM tasks. <xref ref-type="bibr" rid="B22">Kamal et al. (2025)</xref> have described the development of a dynamic realistic 3D version of a delayed match-to-sample task. They demonstrated that it had essential attributes common to 2D versions of a WM task. Increasing the number of items to be remembered (i.e., WM load) resulted in poorer performance. WM capacity was also poorer in older than younger adults. It is this task that will be used in the present study.</p>
<p>An issue with the use of VR tasks is that some individuals are often unable to tolerate lengthy testing periods. Many auditory studies employ an oddball paradigm that usually only includes one or two rarely occurring deviants within the sequence. Most of these studies presented only a single deviant stimulus that was not sufficiently novel to elicit a P3a. To determine the effects of different deviants on the P3a, the usual oddball paradigm would need to be repeated several times, one time for each deviant, leading to a lengthy testing period. This long testing time may not be possible within VR environments. However, may be overcome using a time-efficient, multi-feature paradigm (<xref ref-type="bibr" rid="B45">N&#x00E4;&#x00E4;t&#x00E4;nen et al., 2004</xref>). This paradigm allows for the presentation of several deviants in a single run, each representing a change of a different feature of the standard. The sequence consists of an alternating pattern of standards and deviants. While the overall probability of standard and deviant occurrence is 0.5 for each, the probability of any specific type of deviant is lower. If five deviants are presented, each different deviant occurs relatively rarely, on 10% of trials. <xref ref-type="bibr" rid="B71">Tavakoli and Campbell (2016)</xref> developed a multi-feature paradigm designed specifically for the study of the P3a, in which six different deviants were presented in a single sequence. All deviants elicited a DRN. Only a white noise and novel environmental sound deviants elicited a P3a, while other deviants representing frequency, intensity (both increases and decreases), and duration changes did not.</p>
<p>A multi-feature paradigm was therefore used to efficiently record ERPs to many different types of auditory deviants. In most DRN/MMN studies, participants are asked to watch a sub-titled silent video while ignoring the auditory sequence containing the standards and deviants. The use of this visual task is based on the rationale that the DRN/MMN is largely unaffected by task demands and what the participant &#x201C;is doing.&#x201D; However, <xref ref-type="bibr" rid="B41">Muller-Gass et al. (2006)</xref> have provided strong evidence that participants can, in fact, also co-monitor the auditory channel while carrying out this visual task. Two other visual tasks were run. In a second condition, participants were engaged in a delayed match-to-sample WM task within a VR environment. The VR environment may be so demanding of cognitive resources that few resources will be available to allow for the co-monitoring and processing of the auditory channel. A third condition was also run. Participants were also engaged in a somewhat similar delayed match-to-sample WM task outside of the VR environment. This task was presented on a 2D computer monitor (thus a 2D WM task). If the VR environment is highly demanding of cognitive resources, then attention capture and the P3a should be reduced compared to the control condition. It is however possible that simply engaging in a WM task itself will require many processing resources. As such, the P3a may be reduced in both the VR and 2D versions of a WM task.</p>
</sec>
<sec id="S2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="S2.SS1">
<label>2.1</label>
<title>Participants</title>
<p>Twenty young adults volunteered to participate in this study. All reported normal hearing. One participant was excluded from analysis because of noisy EEG data (see section 2.4). A total of 19 young adults&#x2019; aged 18&#x2013;30 years were therefore analyzed (7 males). All participants were right-handed, with no history of neurological or psychiatric conditions. None were taking medication that could influence central nervous system functioning. This study was approved by the National Research Council of Canada and Carleton University (clearance number 121501) Ethics&#x2019; boards following the guidelines of the Canadian Tri-Council on ethical conduct involving humans. Participants provided informed written consent before starting the study and an honorarium was provided.</p>
</sec>
<sec id="S2.SS2">
<label>2.2</label>
<title>Stimuli and procedure</title>
<p>Participants were asked to focus attention on three different visual tasks presented in different conditions. The order of conditions was randomized. Auditory stimuli were presented concurrently but were irrelevant to the visual tasks and were to-be-ignored. In all conditions, participants were seated in a non-swivel chair in order to reduce muscle artifact. Participants were asked to avoid blinks and overt eye and body movements as much as possible.</p>
<sec id="S2.SS2.SSS1">
<label>2.2.1</label>
<title>Passive video control task</title>
<p>In condition one, participants watched a silent English sub-titled Planet Earth video. It was presented on an 8.8 inch (approximately 22 cm) tablet at a distance of about 1 m from the participant. Watching a video is a commonly used task in DRN/MMN and P3a research and therefore served as a control condition. This condition was repeated twice, with each block lasting about 10 min.</p>
</sec>
<sec id="S2.SS2.SSS2">
<label>2.2.2</label>
<title>Virtual reality working memory task</title>
<p>In the second condition, participants were engaged in a delayed match-to-sample WM task within a realistic 3D VR environment. During the VR WM task, participants wore a head-mounted Meta Quest 2 VR headset, having a 1,832 &#x00D7; 1,920 pixel Fast-Switch LCD display, a 90 Hz refresh rate, and an 89&#x00B0; field of view. The VR system employed wireless hand-held controllers, each featuring a grip handle and a tracking ring at the top for positional and motion tracking. The controllers were equipped with a primary trigger and a grip button, enabling participants to actively interact with and grasp virtual objects.</p>
<p>The VR version of the WM task was adopted from bWell, a multisensorial platform developed by the National Research Council of Canada (<xref ref-type="bibr" rid="B67">Shaigetz et al., 2021</xref>; <xref ref-type="bibr" rid="B22">Kamal et al., 2025</xref>). The task is similar to a traditional delayed match-to-sample WM task and thus includes encoding, maintenance, recognition, and response phases. The procedure for the VR version of the WM task is illustrated in the left portion of <xref ref-type="fig" rid="F1">Figure 1</xref>. Participants were placed in a realistic 3D theatre scene. In the encoding phase, participants saw different objects that appeared on the theatre&#x2019;s screen for 5 s. Participants were instructed to remember these objects. The objects were subsequently concealed behind a curtain for 15 s. During this maintenance phase of the task, participants needed to retain the encoded objects in memory. All objects could be converted to a verbal code (or &#x201C;named&#x201D;) (e.g., heart, triangle) permitting the use of articulatory rehearsal to maintain the encoded objects in memory. In the subsequent recognition phase, objects that matched and others that did not match the previously presented objects (i.e., foils) fell to the stage. Participants had to identify the matching objects and use the controller to pick them up and then place the objects on a pedestal in the correct sequence. The participants were then asked to push a green button that appeared on the stage thus completing the trial. The time to complete the recognition and response phase was very long. They lasted until the response was executed. A 35 s time limit was imposed. Elaborate and precise hand and arm movements were required to execute the response. The next trial began 8 s after the execution of the response The complex and demanding response sequence further assured attention was focussed on the WM task, preventing sampling of the auditory sequence. The same number of objects appeared across five consecutive trials. Successful performance led to the addition of an object (step-up), while an error led to the removal of an object (step-down).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>Delayed match-to-sample working memory (WM) tasks. In the left portion, the procedures for the virtual reality (VR) WM task are illustrated. In this example, the participant was presented with 3 target objects in the encoding phase. In the subsequent recognition phase the same 3 targets and additional foils were presented. The participant needed to select the 3 targets that were originally presented and place them on a pedestal. In the right portion, the procedures for the 2D WM task are illustrated. The participant could be asked to remember 1, 2, or 4 objects. The objects were presented sequentially. In this example, 2 objects were presented and repeated twice within the 4 object presentations. A probe was then presented and the participant needed to decide whether this probe was among the objects that had been previously presented.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1654765-g001.tif">
<alt-text content-type="machine-generated">Diagram compares two working memory tasks: a Virtual Reality WM sequence with objects displayed and concealed on a stage, followed by selection and placement, and a 2D WM task showing sequential memory items and a probe with timing intervals indicated.</alt-text>
</graphic>
</fig>
<p>The VR WM task was repeated twice. Each block lasted about 10 min. Prior to the start of the initial VR WM block, participants were given a practice session lasting about 5 min to ensure they understood the instructions and response demands.</p>
</sec>
<sec id="S2.SS2.SSS3">
<label>2.2.3 2</label>
<title>D working memory task</title>
<p>In a third condition, participants were engaged in a similar delayed match-to-sample WM task presented on a 14 inch (approximately 35 cm) 2D computer monitor having a 90 Hz refresh rate, located about 1 m in front of the participant. The 2D WM task is illustrated in the right portion of <xref ref-type="fig" rid="F1">Figure 1</xref>. In this task, participants were presented with a sequence of different objects followed by a probe. The objects that were used in the VR WM task were also used in the 2D WM task. The participant&#x2019;s task was to determine whether the probe was a member of the set that had just been presented. Each trial began with a 1,000 ms duration fixation point (&#x201C; + &#x201D;) presented in the center of the monitor. The memory set was then presented. This memory set consisted of 1, 2, or 4 different objects. In each trial, 4 objects were always presented. When the memory load was 1, this object was repeated 4 times. When the memory load was 2, the first object was repeated 2 times followed by the second object, repeated 2 times. Four different objects were presented sequentially when the memory load was 4. The objects were taken from those used in the WM task within the VR environment and all objects could therefore be converted into a verbal code (i.e., were nameable). Stimulus duration was relatively brief, each image lasting 250 ms, followed by an interstimulus interval (offset-to-onset) of 300 ms. After the final image of the set, a question mark appeared for 250 ms. This question mark served to warn the participant that the probe was about to be presented. The duration of the probe was also 250 ms, but participants could respond up to 1,000 ms after presentation. Participants were asked to push one mouse button if they decided the probe had been a member of the set and a second mouse button if they decided it had not been previously presented. A total of 104 trials were presented. Because there were only a limited number of objects used in the VR, they were repeated at random over the 104 trials. Half of the probe objects were part of the memory set (positive probe), while the remaining half were not (negative probe). An equal number of positive probes were presented at random in either the first, second, third, or fourth position of the memory set. The memory set and probe objects subtended a horizontal visual angle of 15&#x00B0; and a vertical angle of 10&#x00B0;.</p>
<p>A total of 104 trials were presented, also lasting about 10 min. The 2D WM task was repeated twice. Each block also lasted about 10 min. Again, prior to the start of the initial block, participants were given a practice session lasting about 5 min.</p>
</sec>
</sec>
<sec id="S2.SS3">
<label>2.3</label>
<title>Multi-feature auditory paradigm</title>
<p>A multi-feature auditory sequence was presented concurrently with each of the visual tasks. The auditory stimuli were created using Audacity, version 2.1.0 software. The auditory stimuli were however irrelevant to the visual tasks and were to-be-ignored. Auditory stimuli were synthesized using a SoundBlaster 16-bit waveform generator and presented binaurally through calibrated Sony MDR V6 over-the-ear headphones. These headphones have a relatively flat frequency response from 500 to 4,000 Hz. Standard stimuli (80 dB SPL, 1,000 Hz pure tone, 200 ms duration with a rise-and-fall time of 5 ms) alternated with 6 different deviants. The standards and deviants thus occurred on 50% of trials. The six deviants were created by changing a feature (or features) of the standard: a 10 dB intensity increase (increment), a 20 dB intensity decrease (decrement), a frequency change to 1,100 Hz, a duration decrease to 100 ms, a white noise burst, and environmental sounds. A different environmental sound was presented on each trial. The features of the environmental sounds are described in detail by <xref ref-type="bibr" rid="B14">Fabiani et al. (1996)</xref>. A number of different sounds were presented including animal and human sounds, in addition to sounds commonly heard (telephone ringing, water dripping, car honks, etc.), musical instruments, and mechanical sounds. Although the standard:deviant probability of occurrence was 0.50:0.50, the probability of occurrence of a specific deviant was 0.083. In an array of 6 standards and alternating 6 deviants, each deviant occurred one time. The order of occurrence of a specific deviant was randomized. In each subsequent array, the order of occurrence of a specific deviant was again randomized. The same deviant was never presented consecutively (the first deviant in a new array could not be the same as the last deviant in the previous array). The first ten sounds in the sequence consisted of only standards to establish a memory trace for the standard stimulus. Stimuli were presented rapidly with a stimulus onset asynchrony of 600 ms. Each sequence lasted slightly more than 9 min with 472 standards (including 10 standards presented before the alternating sequence) and 77 of each of the 6 deviants being presented. The multi-feature sequence was presented twice for each visual task condition. A brief rest period was provided between blocks. Total testing time was about 90 min.</p>
</sec>
<sec id="S2.SS4">
<label>2.4</label>
<title>EEG recording</title>
<p>EEG activity was recorded using 29 active silver-silver chloride electrodes, attached to an electrode cap (Brain Products GmbH, Gilching, Germany) and placed over frontal, central, parietal, temporal, and occipital sites according to the international 10&#x2013;10 system. Two additional electrodes were placed on the left and right mastoids (FT9, FT10), where the DRN/MMN inverts in polarity. An additional ocular (EOG) electrode was also placed on the infraorbital ridge of the left eye to record vertical eye movements and blinks. A reference electrode was placed on the tip of the nose. The EEG was sampled at a rate of 500 Hz (i.e., every 2 ms). The low-pass filter was set at 250 Hz. The time constant was 10 s (a high-pass filter of about 0.016 Hz). Electrode impedance was below 20 k&#x03A9;. Frontal (F3, Fz, F4) and central (C3, Cz, C4) sites were regions of interest (ROIs), where the DRN and P3a are maximum in amplitude. Impedances at these ROIs were below 10 k&#x03A9;.</p>
<p>The physiological data were subsequently analyzed using Brain Products&#x2019; Analyzer2 software. The EEG and EOG data were digitally filtered using a high pass filter of 0.5 Hz and using a low-pass filter set at 20 Hz (24 dB/octave roll-off). The EEG was visually inspected for channels containing high levels of noise. These channels were replaced by interpolating the data of the surrounding electrode sites (<xref ref-type="bibr" rid="B56">Perrin et al., 1989</xref>). Interpolation was not used for the frontal and central ROIs. Only 1 participant had more than 4 channels containing excessive noise and was thus excluded from further analyses. Independent Component Analysis (<xref ref-type="bibr" rid="B35">Makeig et al., 1996</xref>) was then used to identify eye movement and blink artifacts that were statistically independent of the EEG activity. To correct for these artifacts occurring within the EEG signals, vertical and horizontal EOG activity needed to be computed. A vertical EOG channel was computed by subtracting activity recorded at FP1 from the infra-orbital ridge. A horizontal EOG channel was computed by subtracting FT9 activity from that of FT10. The continuous EEG was then reconstructed into 700 ms epochs starting 100 ms before stimulus onset. The average of EEG within the 100 ms pre-stimulus period served as a zero-voltage baseline for all standard and deviant stimuli. Each single trial epoch was then baseline-corrected to remove slow voltage &#x201C;drifts.&#x201D; For each of the standard and deviant single trials, the mean amplitude of all data points within the baseline period was subtracted from that of all subsequent data points in the post-stimulus period. Any single epoch containing activity exceeding &#x00B1; 100 &#x03BC;V was then rejected from further analysis. Rejections based on this criterion were relatively rare because the most common type of artifact, eye blinks and movements, had already been corrected. Other sources of artifact including arm and hand movements occurring within the VR task were largely attenuated by the EEG bandpass filter. <xref ref-type="table" rid="T1">Table 1</xref> presents the total number of trials that were accepted for the different conditions and stimuli. Fewer than 1.5% of trials were rejected on the basis of artifact. Following the presentation of both standard and deviant stimuli, slightly more trials were rejected during the VR condition (1.3% for both standards and deviants) than during the 2D WM condition (0.5% of standards and 0.6% of deviants). While the differences between the conditions were statistically significant (<italic>p</italic> &#x003C; 0.05 in both cases), such small differences had little practical significance. The single epochs were then sorted according to electrode site, visual task condition, and stimulus type (standard, six deviants), then averaged.</p>
<table-wrap position="float" id="T1">
<label>TABLE 1</label>
<caption><p>Mean number and SD of accepted trials for the standard and deviant stimuli in the 3 visual task conditions.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Condition</th>
<th valign="top" align="center">Stimulus</th>
<th valign="top" align="center">Total trials</th>
<th valign="top" align="center">Accepted mean</th>
<th valign="top" align="center">Accepted SD</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="4"><italic>Movie</italic></td>
<td valign="top" align="center">Standard</td>
<td valign="top" align="center">924</td>
<td valign="top" align="center">919.4</td>
<td valign="top" align="center">10.4</td>
</tr>
<tr>
<td valign="top" align="center">Environmental</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">151.6</td>
<td valign="top" align="center">3.3</td>
</tr>
<tr>
<td valign="top" align="center">White noise</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.6</td>
<td valign="top" align="center">2.3</td>
</tr>
<tr>
<td valign="top" align="center">Decrement</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.7</td>
<td valign="top" align="center">2.0</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="7"><italic>VR WM</italic></td>
<td valign="top" align="center">Increment</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.1</td>
<td valign="top" align="center">2.7</td>
</tr>
<tr>
<td valign="top" align="center">Duration</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.1</td>
<td valign="top" align="center">1.9</td>
</tr>
<tr>
<td valign="top" align="center">Frequency</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.3</td>
<td valign="top" align="center">2.2</td>
</tr>
<tr>
<td valign="top" align="center">Standard</td>
<td valign="top" align="center">924</td>
<td valign="top" align="center">913.1</td>
<td valign="top" align="center">16.5</td>
</tr>
<tr>
<td valign="top" align="center">Environmental</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.2</td>
<td valign="top" align="center">2.4</td>
</tr>
<tr>
<td valign="top" align="center">White noise</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.2</td>
<td valign="top" align="center">2.2</td>
</tr>
<tr>
<td valign="top" align="center">Decrement</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.2</td>
<td valign="top" align="center">2.1</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="10"><italic>2D WM</italic></td>
<td valign="top" align="center">Increment</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">151.2</td>
<td valign="top" align="center">2.4</td>
</tr>
<tr>
<td valign="top" align="center">Duration</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">151.9</td>
<td valign="top" align="center">2.8</td>
</tr>
<tr>
<td valign="top" align="center">Frequency</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.2</td>
<td valign="top" align="center">2.2</td>
</tr>
<tr>
<td valign="top" align="center">Standard</td>
<td valign="top" align="center">924</td>
<td valign="top" align="center">919.1</td>
<td valign="top" align="center">4.3</td>
</tr>
<tr>
<td valign="top" align="center">Environmental</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">153.3</td>
<td valign="top" align="center">1.0</td>
</tr>
<tr>
<td valign="top" align="center">White noise</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">153.2</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="center">Decrement</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">153.2</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="center">Increment</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.9</td>
<td valign="top" align="center">1.1</td>
</tr>
<tr>
<td valign="top" align="center">Duration</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">153.3</td>
<td valign="top" align="center">0.9</td>
</tr>
<tr>
<td valign="top" align="center">Frequency</td>
<td valign="top" align="center">154</td>
<td valign="top" align="center">152.9</td>
<td valign="top" align="center">1.1</td>
</tr>
</tbody>
</table></table-wrap>
<sec id="S2.SS4.SSS1">
<label>2.4.1</label>
<title>ERP Quantification and analysis</title>
<p>Both the standard and the deviant elicited obligatory N1 and P2 ERP deflections. The deviants elicited a series of additional deflections, including the DRN, and for some deviants, the P3a. The DRN and P3a are best observed in a difference wave computed by subtracting point-by-point the average response of the standard from that of the deviant. The older, traditional method for scoring ERP deflections identified a maximum peak within latency range. There are however both theoretical and methodological problems with this method. It assumes that a cognitive process, for example, change detection, occurs at a specific point in time. In reality, cognitive processes and decisions occur over a period of time. The peak detection method encounters issues when low amplitude responses are embedded in high frequency noise. Residual noise might thus be measured as the maximum peak. <xref ref-type="bibr" rid="B30">Luck (2014)</xref> recommends a different measurement technique. All data points within a specific interval of time are averaged. This procedure has the advantage that it better approximates the duration of an actual cognitive process. The mean amplitude measure should also cancel the positive- and negative-going residual noise. The DRN and P3a were therefore quantified for each individual using the mean amplitude of all data points within &#x00B1; 25 ms of the peak identified in the grand average difference wave for each deviant and each task condition. There are problems with the mean amplitude measure. It may tend to smear or underestimate the true amplitude. <xref ref-type="table" rid="T2">Table 2</xref> presents the 50 ms time intervals used for the scoring of the DRN and P3a in the control (watch movie) condition. Their amplitudes were measured with respect to the zero-voltage pre-stimulus baseline.</p>
<table-wrap position="float" id="T2">
<label>TABLE 2</label>
<caption><p>50-ms time windows used for the scoring of the DRN and P3a in the control (watch movie) condition.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Deviant</th>
<th valign="top" align="center">DRN</th>
<th valign="top" align="center">P3a</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Environmental</td>
<td valign="top" align="center">095&#x2013;145</td>
<td valign="top" align="center">185&#x2013;235</td>
</tr>
<tr>
<td valign="top" align="left">White noise</td>
<td valign="top" align="center">085&#x2013;135</td>
<td valign="top" align="center">180&#x2013;230</td>
</tr>
<tr>
<td valign="top" align="left">Decrement</td>
<td valign="top" align="center">080&#x2013;130</td>
<td valign="top" align="center">240&#x2013;290</td>
</tr>
<tr>
<td valign="top" align="left">Increment</td>
<td valign="top" align="center">075&#x2013;125</td>
<td valign="top" align="center">205&#x2013;255</td>
</tr>
<tr>
<td valign="top" align="left">Duration</td>
<td valign="top" align="center">075&#x2013;125</td>
<td valign="top" align="center">255&#x2013;305</td>
</tr>
<tr>
<td valign="top" align="left">Frequency</td>
<td valign="top" align="center">075&#x2013;125</td>
<td valign="top" align="center">210&#x2013;260</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>The same approximate time windows were also used for the VR WM and 2D WM conditions.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>Significant differences among deviant ERPs would usually be determined with an analysis of variance (ANOVA) statistical procedure. It was expected that only certain deviants would elicit a P3a but for other deviants, a P3a would be absent (<xref ref-type="bibr" rid="B71">Tavakoli and Campbell, 2016</xref>; <xref ref-type="bibr" rid="B37">Morrison et al., 2020</xref>). The usual ANOVA procedure could be used to determine if the P3a amplitude was significantly <italic>smaller</italic> or <italic>larger</italic> among deviants. The ANOVA cannot be used to determine if the P3a was absent (i.e., not elicited). To confirm if a DRN or P3a had been elicited by a deviant, confidence intervals were initially computed to determine if both the lower and upper limits were significantly less (or negative-going, in the case of the DRN) or greater (or positive-going, in the case of the P3a) than the zero-amplitude pre-stimulus zero voltage baseline. The procedure was run at the Fz electrode site for the DRN and at Cz for the P3a, where each tends to be at maximum amplitude. Because a directionality was predicted (negativity in the case of the DRN and positivity in the case of the P3a), one-tailed tests of significance (<italic>p</italic> &#x003C; 0.05) were applied to the confidence intervals. Such use of a liberal statistical procedure does increase the risk of type I error, claiming a response was elicited when in fact, it was absent. The use of liberal procedures would allow possible small amplitude responses to pass to the additional ANOVA procedures. A more conservative procedure would remove the possibility of additional analyses. Nevertheless, to restrict the likelihood of chance findings, the negativity had to conform to the usual latency (100&#x2013;250 ms) and scalp distribution (fronto-central maximum, inversion in polarity at the mastoids) for the DRN, while the positivity had to conform to the usual latency (200&#x2013;300 ms) and scalp distribution (centro-frontal maximum) of the P3a.</p>
<p>Confidence interval testing for the DRN (<xref ref-type="table" rid="T2">Table 2</xref>) revealed that its amplitude was significantly different from zero-voltage baseline for all 6 deviants. Separate ANOVAs were then conducted at frontal (F3, Fz, F4) and central (C3, Cz, C4) ROIs and included the DRN data for all six deviants. A 3-way ANOVA with repeated measures on visual Task (video, VR WM, and 2D WM), Deviant (intensity increment, intensity decrement, frequency and duration change, white noise, and novel environmental sounds) and electrode site was computed for the DRN. Confidence interval testing for the P3a (<xref ref-type="table" rid="T3">Table 3</xref>) revealed that its amplitude differed significantly from the zero-voltage baseline for only the white noise and novel environmental sounds. The same 3-way repeated measures ANOVA was run but the deviant factor was restricted to the white noise and environmental sound deviants. The ANOVA was again run separately for the frontal and central ROIs. A 2-way ANOVA with repeated measures was also run for the N1 and P2 deflections following the presentation of the standard.</p>
<table-wrap position="float" id="T3">
<label>TABLE 3</label>
<caption><p>Mean DRN, measured at Fz and P3a, measured at Cz, amplitude (SDs) and 95% lower and upper confidence intervals for the control (passive video watching) condition.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Deviant</th>
<th valign="top" align="center">Amplitude (SD)</th>
<th valign="top" align="center">95% CI</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" colspan="3"><bold>DRN</bold></td>
</tr>
<tr>
<td valign="top" align="left">Environmental</td>
<td valign="top" align="center">&#x2212;1.65 (1.52)</td>
<td valign="top" align="center">(&#x2212;2.39, &#x2212;0.92)</td>
</tr>
<tr>
<td valign="top" align="left">White noise</td>
<td valign="top" align="center">&#x2212;1.15 (1.55)</td>
<td valign="top" align="center">(&#x2212;1.90, &#x2212;0.41)</td>
</tr>
<tr>
<td valign="top" align="left">Decrement</td>
<td valign="top" align="center">&#x2212;2.03 (1.32)</td>
<td valign="top" align="center">(&#x2212;2.67, &#x2212;1.40)</td>
</tr>
<tr>
<td valign="top" align="left">Increment</td>
<td valign="top" align="center">&#x2212;1.42 (1.41)</td>
<td valign="top" align="center">(&#x2212;2.10, &#x2212;0.74)</td>
</tr>
<tr>
<td valign="top" align="left">Duration</td>
<td valign="top" align="center">&#x2212;1.57 (1.03)</td>
<td valign="top" align="center">(&#x2212;2.06, &#x2212;1.07)</td>
</tr>
<tr>
<td valign="top" align="left">Frequency</td>
<td valign="top" align="center">&#x2212;2.01 (1.13)</td>
<td valign="top" align="center">(&#x2212;2.55, &#x2212;1.46)</td>
</tr>
<tr>
<td valign="top" align="left" colspan="3"><bold>P3a</bold></td>
</tr>
<tr>
<td valign="top" align="left">Environmental</td>
<td valign="top" align="center">+6.11 (3.38)</td>
<td valign="top" align="center">(+4.49, +7.74)</td>
</tr>
<tr>
<td valign="top" align="left">White noise</td>
<td valign="top" align="center">+6.56 (2.85)</td>
<td valign="top" align="center">(+5.96, +8.71)</td>
</tr>
<tr>
<td valign="top" align="left">Decrement</td>
<td valign="top" align="center">+1.02 (1.31)</td>
<td valign="top" align="center">(&#x2212;0.56, +1.84)</td>
</tr>
<tr>
<td valign="top" align="left">Increment</td>
<td valign="top" align="center">+1.32 (2.17)</td>
<td valign="top" align="center">(&#x2212;0.43, +2.36)</td>
</tr>
<tr>
<td valign="top" align="left">Duration</td>
<td valign="top" align="center">&#x2212;0.17 (1.63)</td>
<td valign="top" align="center">(&#x2212;1.47, +1.35]</td>
</tr>
<tr>
<td valign="top" align="left">Frequency</td>
<td valign="top" align="center">&#x2212;0.24 (1.57)</td>
<td valign="top" align="center">(&#x2212;1.26, +1.15]</td>
</tr>
</tbody>
</table></table-wrap>
<p>The assumption of sphericity for repeated measures was tested using the Mauchly procedure. When the assumption was violated (<italic>p</italic> &#x003C; 0.05), <xref ref-type="bibr" rid="B17">Greenhouse and Geisser (1959)</xref> correction procedures were applied.</p>
</sec>
</sec>
</sec>
<sec id="S3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="S3.SS1">
<label>3.1</label>
<title>Performance data</title>
<sec id="S3.SS1.SSS1">
<label>3.1.1 2</label>
<title>D WM</title>
<p>Performance was measured in terms of accuracy of detection of the targets (hits) and foils (correct rejections), and speed of responding, reaction time (RT). A one-way ANOVA with repeated measures on Load (1, 2, or 4 items) was computed on these data. As expected, as WM Load increased from 1 to 4 items, the hit rate significantly decreased, <italic>F</italic>(2, 36) = 55.43, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.75 and the corresponding RT significantly increased, <italic>F</italic>(2, 36) = 9.02, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.33. Similar findings were observed for the correct rejections. As WM Load increased, the correction rejection rate significantly decreased, <italic>F</italic>(2, 36) = 6.05, <italic>p</italic> &#x003C; 0.005, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.25 and RT significantly increased, <italic>F</italic>(2, 36) = 5.54, <italic>p</italic> &#x003C; 0.008, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.24.</p>
</sec>
<sec id="S3.SS1.SSS2">
<label>3.1.2</label>
<title>VR WM</title>
<p>Performance was measured only in terms of accuracy. Response times were very long ranging from 10 to 15 s and consisted of a series of complex cognitive operations including preparation to respond, its initiation and its execution using both arm and hand movements. A one-way ANOVA with repeated measures on WM Load was run on the accuracy data. Accuracy significantly decreased as WM Load increased, <italic>F</italic>(3, 64) = 6.48, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.23.</p>
</sec>
</sec>
<sec id="S3.SS2">
<label>3.2</label>
<title>ERP data</title>
<sec id="S3.SS2.SSS1">
<label>3.2.1</label>
<title>Standard ERPs</title>
<p>The DRN and P3a were quantified in a deviant&#x2013;standard difference wave. The use of the difference wave assumes that experimental effects only affected the processing of the deviant. This assumption may not be true. Experimental effects might have affected the difference wave through either differential processing of either the deviant or the standard. The assumption that the processing of the standard was not affected by the visual task demands was therefore tested. The standard ERP waveforms are illustrated in the left-hand portion of <xref ref-type="fig" rid="F2">Figure 2</xref>. The standard elicited a very small amplitude N1 because of the rapid rate of stimulus presentation. The amplitude of the N1 did not differ significantly among the three visual task conditions, <italic>F</italic> &#x003C; 1 at both the frontal and central ROIs. Similarly, the amplitude of the following P2 was not significantly affected by the visual conditions, <italic>F</italic> &#x003C; 1 at both ROIs.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>&#x201C;Raw&#x201D; standard and deviant ERPs (left and middle portions, respectively), and the deviant-standard difference wave (right portion). Negativity at the scalp relative to the nose reference is indicated by a downward deflection in this and all other Figures. The standard elicited only a small amplitude N1 at about 100 ms and a somewhat larger P2 at about 180 ms Neither the amplitude of N1 nor P2 significantly differed among the three task conditions. A DRN (filled upward arrow) is apparent for all deviants in the deviant-standard difference waveforms. The DRN was not significantly influenced by the different visual tasks. Only the white noise and environmental sound deviants elicited a significant P3a (downward open arrow). Its amplitude was significantly larger in the video condition. A significant P3a was not elicited by the other deviants in any of the tasks.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1654765-g002.tif">
<alt-text content-type="machine-generated">Electroencephalogram (EEG) waveforms compare brain responses across three conditions: Video (black), Virtual Reality (blue), and WM-2D (magenta) to six auditory deviants: White Noise, Environmental, Increment, Decrement, Duration, and Frequency. Columns display Standard, Deviant, and Difference (Deviant minus Standard). N1 and P2 peaks are labeled for Standard. Difference plots show significant deviations, marked by triangle symbols.Black triangle represents the DRN and White triangle represents P3a. Axes indicate voltage ranging from plus four to minus four microvolts and a time scale up to six hundred milliseconds. A color-coded key identifies line types for each condition.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS2.SSS2">
<label>3.2.2</label>
<title>Deviant ERPs</title>
<sec id="S3.SS2.SSS2.Px1">
<label>3.2.2.1</label>
<title>DRN</title>
<p>The deviant-standard difference waves are illustrated in the right portion of <xref ref-type="fig" rid="F2">Figure 2</xref>. The six deviants elicited a DRN from 100 to 125 ms after stimulus presentation. The DRN was maximum over frontocentral areas and inverted in polarity at the mastoids. In the control condition, when participants were asked to watch a video and ignore the auditory stimuli, a significant DRN was elicited by all six deviant stimuli as determined by confidence interval testing. All six deviants were therefore included in the subsequent ANOVA procedure. Its amplitude varied significantly among the six deviants, <italic>F</italic>(2.8, 51.2) = 5.30, <italic>p</italic> &#x003C; 0.01, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.23 at the frontal ROI. At the central ROI, the effect of the type of deviant failed to reach significance following Greenhouse-Geisser corrections, <italic>F</italic>(2.4, 44.0) = 2.47, <italic>p</italic> &#x003C; 0.09, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.12 at the central ROI. Importantly, visual task demands did not however significantly affect the amplitude of the DRN, <italic>F</italic> &#x003C; 1 at both the frontal and central ROIs. The Task &#x00D7; Deviant interaction was also not significant at either the frontal or central ROI, <italic>F</italic>(10, 180) = 1.23, <italic>p</italic> &#x003C; 0.27, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.06 and <italic>F</italic>(10, 80) = 1.42, <italic>p</italic> &#x003C; 0.17, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.07, respectively.</p>
</sec>
<sec id="S3.SS2.SSS2.Px2">
<label>3.2.2.2</label>
<title>P3a</title>
<p>A large positivity, the P3a, occurring between 200 and 225 ms was elicited by the white noise and novel environmental sounds in the control condition. The P3a was maximum over centro-frontal regions of the scalp. Confidence interval testing indicated that this positivity was only significant following presentation of the white noise (<xref ref-type="fig" rid="F3">Figure 3</xref>) and environmental sound (<xref ref-type="fig" rid="F4">Figure 4</xref>) deviants. The positivity was not significant for the remaining four deviants.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>White noise deviant-standard difference wave across frontal, central, and parietal regions. A fronto-central maximum DRN is apparent at about 100&#x2013;120 ms. It inverted in amplitude at the mastoids (M1, M2). This peak was followed by a P3a occurring at about 210 ms. At both central and frontal regions, the P3a was significantly larger when participants were asked to watch a video compared to when they were engaged in a VR WM or a 2D WM task.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1654765-g003.tif">
<alt-text content-type="machine-generated">Nine line plots display EEG event-related potential data across scalp positions F3, Fz, F4, C3, Cz, C4, M1, Pz, and M2 under the condition labeled &#x201C;White Noise.&#x201D; Each plot compares three conditions: Video (black line), VR WM (blue line), and 2D WM (magenta line), with axes labeled microvolts and milliseconds. Black triangle represents the DRN and White triangle represents P3a.</alt-text>
</graphic>
</fig>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>Novel environmental sound deviant-standard difference wave across frontal, central, and parietal regions. A fronto-central maximum DRN is apparent at about 120 ms. It inverted in amplitude at the mastoids (M1, M2). A P3a is also apparent at about 215 ms. At both central and frontal regions, the P3a was significantly larger when participants were asked to watch a video compared to when they were engaged in a VR WM or a 2D WM task.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1654765-g004.tif">
<alt-text content-type="machine-generated">Line graphs display EEG waveforms from nine electrode sites (F3, Fz, F4, C3, Cz, C4, M1, Pz, M2) labeled along the axes, comparing responses to Environmental Sounds in three conditions: Video (black), VR WM (cyan), and 2D WM (magenta). The y-axis represents microvolts and the x-axis represents time, labeled up to six hundred milliseconds. A legend identifies line colors for each condition.</alt-text>
</graphic>
</fig>
<p>An initial ANOVA was run on all 6 deviants at the central ROI where it was largest. The main effect of deviant type was significant, <italic>F</italic>(5, 90) = 36.62, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.68. Holm <italic>post hoc</italic> follow-up procedures indicated that the amplitude of the P3a for the white noise and environmental sound deviants was significantly larger (<italic>p</italic> &#x003C; 0.01 in all comparisons) than for the remaining deviants. The amplitude of the P3a was very small and did not significantly differ (<italic>p</italic> &#x003E; 0.05) among these 4 deviants.</p>
<p>A second ANOVA was therefore computed only for the white noise and environmental sound deviants. The mean values of the central ROI P3a as a function of visual task and type of deviant are illustrated in <xref ref-type="fig" rid="F5">Figure 5</xref>. The main effect of visual task was significant, <italic>F</italic>(2, 36) = 15.75, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.47. <italic>Post hoc</italic> testing revealed that the P3a was significantly larger when participants watched the video compared to carrying out either the VR WM task (<italic>p</italic> &#x003C; 0.01) or the 2D WM task (<italic>p</italic> &#x003C; 0.01). The effect tended to be larger for the environmental sound deviant (<xref ref-type="fig" rid="F5">Figure 5</xref>), but the interaction between type of deviant and visual task was not significant, <italic>F</italic>(2, 36) = 2.81, <italic>p</italic> = 0.07, &#x03B7;<sub>p</sub><sup>2</sup> = 0.13. P3a differences between the VR WM and 2D WM tasks were not significant. Similar effects were apparent at the frontal ROI. The main effect of visual task was significant, <italic>F</italic>(2,36) = 11.70, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub>p</sub><sup>2</sup> = 0.39. Again, <italic>post hoc</italic> testing revealed that the P3a was significantly larger when participants watched the video compared to carrying out either the VR WM task or the 2D WM task (<italic>p</italic> &#x003C; 0.01 in both cases). The Deviant x Task interaction was again not significant, <italic>F</italic>(2, 36) = 2.63, <italic>p</italic> &#x003C; 0.09, &#x03B7;<sub>p</sub><sup>2</sup> = 0.13.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>Mean P3a amplitude averaged across frontal and central ROI sites as a function of visual task for the white noise deviant (left) and the environmental sound deviant (right). Standard Error (SE) bars are also indicated error. P3a was significantly larger when participants were asked to watch a video compared to when they were engaged in a VR or a 2D working memory (WM) task.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1654765-g005.tif">
<alt-text content-type="machine-generated">Four bar graphs compare P3a amplitude (microvolts) for three visual tasks: Video, VR WM, and 2D WM across White Noise and Environmental Sounds, with error bars shown. Significant differences (p less than 0.01) are marked between Video and the other tasks. Top graphs display Frontal ROI results; bottom graphs show Central ROI.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS2.SSS2.Px3">
<label>3.2.2.3</label>
<title>Scalp distribution maps</title>
<p>Exploratory spline scalp distribution maps (<xref ref-type="bibr" rid="B56">Perrin et al., 1989</xref>) of the P3a were also computed for the white noise and environmental sound deviants in the three visual task conditions (<xref ref-type="fig" rid="F6">Figure 6</xref>). As can be observed, the P3a had a distinct centro-frontal positivity regardless of the type of deviant or the nature of the visual task. Thus, while the visual task did have a significant effect on the amplitude of the P3a, its scalp distribution did not appear to change across the three visual task conditions.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>Spline maps of the P3a following presentation of the white noise and environmental sound deviants. The maps are displayed from a top of a flattened head that extends 20&#x00B0; below the Fp1-T7-Oz-T8-Fp2 circumference to show data from the most inferior electrodes (FT9, FT10, TP9, TP10). Note that the maps are scaled relative to the minimum and maximum amplitudes for each condition and type of deviant. The minima and maxima differ as a function of condition and type of deviant. The maps cannot therefore be used to determine where the condition and type of deviant influenced the absolute amplitude of the P3a. Rather the maps illustrate the difference in scalp topography. Thus, although the amplitude of the P3a was significantly reduced in the VR WM and 2D WM tasks, its scalp distribution was very similar to those observed when participants watched a video. The P3a was largest over centro-frontal regions of the scalp for the two deviants and in each of the Task conditions.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1654765-g006.tif">
<alt-text content-type="machine-generated">Graphic showing six topographic brain maps comparing P3a event-related potentials across tasks. Columns represent Video, Virtual Reality Working Memory (WM), and 2D WM conditions. Rows represent White Noise and Environmental Sounds. Each map displays activation in time windows from approximately 195 to 260 milliseconds and a voltage scale from minus 4 to plus 8 microvolts.</alt-text>
</graphic>
</fig>
</sec>
</sec>
</sec>
</sec>
<sec id="S4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>When an individual&#x2019;s attention is focussed on the processing demands of a cognitive task, certain rarely-presented but potentially highly relevant auditory stimuli that are not attended will be involuntarily processed. Such stimuli may cause a switch of attention from the relevant visual task-at-hand to the processing of the unattended auditory input. The salient features of the auditory input have prompted the central executive to shift processing priorities. The switching of attention may come at a cost. The switching of attention from the task-at-hand may result in a deterioration in performance on this task. In the present study, the extent of processing of the unattended auditory stimulus was quantified by recording event-related potentials (ERPs) following the presentation of these stimuli. Participants&#x2019; attention was directed to the demands of different visual tasks. Auditory stimuli were presented concurrently but these were irrelevant to the visual tasks and were to-be-ignored. The auditory sequence consisted of a frequently occurring standard stimuli and six different deviant stimuli. The deviants were created by varying different features of the standard. All deviants were expected to elicit a negativity occurring from 100 to 150 ms, the DRN, reflecting detection of stimulus change. The features of the white noise and environmental sound deviants represented a large extent of change from the standard stimulus and as such, were expected to elicit a later positivity occurring between 200 and 250 ms, the P3a. It is the P3a that is associated with attention capture and a switch of processing resources from the visual task-at-hand and toward the potentially more relevant processing of the auditory channel. Three different visual tasks were employed. The &#x201C;gold standard&#x201D; ERP procedure asks participants to watch a silent sub-titled video while ignoring the auditory sequence. This passive video was used as a control condition in the present study. A problem with the use of the video task is that the experimenter has little control over what the participant is actually &#x201C;doing.&#x201D; Participants might, for example, also eavesdrop on the auditory sequence. Two other visual task conditions were therefore also run. Participants were also presented with a WM task within a VR environment and in a third condition, a traditional WM task presented on a 2D monitor.</p>
<p>VR environments are claimed to be immersive, often evoking a distinct sense of presence in the virtual environment and a lack of awareness of events in the real, external environment (<xref ref-type="bibr" rid="B62">Sanchez-Vives and Slater, 2005</xref>). Such presence in a VR environment might thus prevent attention being switched from the relevant visual VR task to the irrelevant auditory channel. In the present study, an actual measure of presence was not taken. <xref ref-type="bibr" rid="B47">Nannipieri (2022)</xref> reports that as many as 40 different types of questionnaires have been administered purporting to measure the extent of perceived &#x201C;presence.&#x201D; Whether these questionnaires are able to measure an abstract concept such as presence has been seriously questioned. There are a number of issues. The questionnaires are typically administered after the session rather than during it and participants are asked to recall what they had experienced. Also, the questionnaires rely obviously on subjective report. Reviews by <xref ref-type="bibr" rid="B36">Marto and Gon&#x00E7;alves (2024)</xref> and <xref ref-type="bibr" rid="B26">Kukshinov et al. (2025)</xref> indicate that the many variants do not measure presence in the same way, and they ask different questions. Kusinov et al. note, &#x201C;these numerous questionnaires often produce incomparable measurements of presence.&#x201D; A limitation of the present study, is however, that it did not provide a measure of &#x201C;presence&#x201D; that is independent of the physiological measure, the P3a.</p>
<sec id="S4.SS1">
<label>4.1</label>
<title>Performance data</title>
<p>Studies have reported that some participants experience different types of discomfort including motion sickness, nausea and headache within the VR environment. Such discomfort may have a pronounced adverse effect on performance. Such symptoms do vary with the testing procedures used within the VR environment. In a previous study using the same VR WM task, <xref ref-type="bibr" rid="B22">Kamal et al. (2025)</xref> administered the Simulator Sickness Questionnaire (SSQ) (<xref ref-type="bibr" rid="B23">Kennedy et al., 1993</xref>) to younger and older participants, none of whom were involved in the present study. Results indicated that only mild to moderate discomfort was experienced. For this reason, the SSQ was not administered in the present study. Participants were, however, asked if they had experienced discomfort within the VR environment. None reported adverse effects.</p>
<p>Performance on the VR WM and 2D WM tasks replicated many previous studies on the effects of WM load (<xref ref-type="bibr" rid="B10">Cowan, 2001</xref>; <xref ref-type="bibr" rid="B31">Luck and Vogel, 2013</xref>; <xref ref-type="bibr" rid="B48">Oberauer and Hein, 2012</xref>). As the number of objects needed to be maintained in memory (i.e., WM load) increased, accuracy of detection decreased in both versions of the WM task. Thus, in spite of the fact that both the stimulus and response parameters differed markedly between the two WM tasks, the effect of WM load was very similar. Importantly, the behavioral results can be used as evidence that the participant was indeed attentive to and engaged in the visual task-at-hand. On the other hand, the performance data cannot be used as evidence that the participants did not also attend the irrelevant auditory sequence. The ERP data were used to quantify the extent of processing of the unattended auditory stimuli.</p>
</sec>
<sec id="S4.SS2">
<label>4.2</label>
<title>ERP data</title>
<sec id="S4.SS2.SSS1">
<label>4.2.1</label>
<title>DRN</title>
<p>As expected, the DRN was elicited by all deviants, reflecting the automatic detection of acoustic change. This DRN was also observed by <xref ref-type="bibr" rid="B71">Tavakoli and Campbell (2016)</xref> and <xref ref-type="bibr" rid="B37">Morrison et al. (2020)</xref> using a very similar multi-feature paradigm. The amplitude of the DRN was affected by the type of deviant that was presented. The purpose of this study was not to determine why deviant features can affect the amplitude of the DRN. Several studies have now demonstrated that the amplitude of the DRN/MMN varies directly with the extent of stimulus change in both oddball and multi-deviant paradigms (<xref ref-type="bibr" rid="B61">Sams et al., 1985</xref>; <xref ref-type="bibr" rid="B75">Tiitinen et al., 1994</xref>; <xref ref-type="bibr" rid="B51">Pakarinen et al., 2007</xref>; <xref ref-type="bibr" rid="B11">Duda-Milloy et al., 2019</xref>; <xref ref-type="bibr" rid="B19">Honbolyg&#x00F3; et al., 2024</xref>). Moreover, <xref ref-type="bibr" rid="B50">Pakarinen et al. (2009)</xref> and <xref ref-type="bibr" rid="B11">Duda-Milloy et al. (2019)</xref> also used multi-deviant paradigms and observed that the amplitude of the DRN/MMN elicited by different deviants was directly related to behavioral detection rates for these deviants. On the other hand, <xref ref-type="bibr" rid="B71">Tavakoli and Campbell (2016)</xref> using the same deviants as those used in the present study did not observe a relationship between detectability of the deviants and the amplitude of the DRN.</p>
<p>There is some evidence that subtle reductions in the amplitude of the DRN/MMN can occur during exceedingly demanding visual tasks, but these effects are also dependent on the type of deviant (<xref ref-type="bibr" rid="B44">N&#x00E4;&#x00E4;t&#x00E4;nen et al., 1993</xref>; <xref ref-type="bibr" rid="B41">Muller-Gass et al., 2006</xref>). For this reason, several different types of deviants were used in the present study. It is also possible that the various visual tasks used in previous studies were not so demanding to prevent the participant from also sampling the auditory sequence. A VR WM task that was presumed to be highly demanding of cognitive resources was therefore used in the present study. The amplitude of the DRN elicited by the various deviants in the usual watching a video condition did not significantly differ from those elicited when the participant was engaged in a VR WM or a 2D WM task. This finding thus replicates those of many other studies examining the effects of visual task demands. Furthermore, this finding largely replicates other VR studies. <xref ref-type="bibr" rid="B74">Terkildsen and Makransky (2019)</xref> noted that a DRN (that they labeled as the &#x201C;MMN&#x201D;) elicited by a large frequency deviant (1,200 Hz standard, 2,000 Hz deviant) was reduced in those who experienced a large sense of presence within a VR environment compared to those who did not. In the present study, the frequency difference between the standard and deviant was much smaller (1,000 vs. 1,100 Hz). <xref ref-type="bibr" rid="B16">Grassini et al. (2021)</xref> did however also use a 1,200 and 2,000 Hz standard-deviant difference and also failed to observe the influence of VR presence on the elicited DRN. They did note that the Terkildsen and Makransky VR employed a horror-based VR game that may have induced a much stronger emotional reaction, fear, than their roller coaster ride. It is thus possible that modification of the DRN is only possible when a highly demanding cognitive task also activates strong emotional reactions to consume so much of the limited capacity system that very little is available for the processing of unattended auditory stimulus input.</p>
</sec>
<sec id="S4.SS2.SSS2">
<label>4.2.2</label>
<title>P3a</title>
<p>As expected, when participants watched the video, only certain deviant stimuli elicited a P3a. <xref ref-type="bibr" rid="B71">Tavakoli and Campbell (2016)</xref> and <xref ref-type="bibr" rid="B37">Morrison et al. (2020)</xref> also reported that only the white noise and environmental sound deviants elicited a P3a when a multi-feature paradigm was used. So powerful are the effects of these deviants that they may even elicit a P3a during an unconscious state, natural sleep (<xref ref-type="bibr" rid="B72">Tavakoli et al., 2019</xref>), perhaps alerting the sleeper to the occurrence of potentially highly relevant auditory input. Preventing such attention capture by the occurrence of such highly novel stimuli is therefore an onerous task. Remarkably, in the present study, the ability of these auditory deviants to capture attention and to distract attention away from the task-at-hand was reduced when participants were engaged in a WM task carried out in both the 2D and VR environments. Thus, while the DRN appears to reflect a largely automatic process, detection of acoustic change, this may not be the case for the P3a. The extent to which the occurrence of an unattended but highly novel auditory stimulus will interrupt the central executive&#x2019;s ability to focus attention on a current task-at-hand thus appears to be dependent on the processing demands of this task. The switching of attention to the distracting event is therefore not a fully automatic, bottom-up process. It may also be at least somewhat influenced by top-down processes.</p>
</sec>
<sec id="S4.SS2.SSS3">
<label>4.2.3</label>
<title>Comparing VR and 2D WM tasks</title>
<p>The purpose of this study was to determine if attention capture by an irrelevant auditory stimulus can be reduced by the demands of a visual VR task. The amplitude of the P3a to very novel but unattended auditory stimuli was indeed reduced when participants were engaged in a WM task within the VR environment. Nevertheless, the reduction of the P3a was not unique to the VR condition. A reduction in P3a was also observed during another WM task carried out on a 2D monitor outside of the VR environment. It is therefore not clear if the reduction of the P3a within the VR environment was because of the unique demands of VR itself or a result of engagement in a WM task within this environment. The WM task parameters were quite different in the two conditions. The VR WM task was, of course, much more realistic involving a 3D high resolution head-mounted display. The stimulus and response parameters were also quite different. Thus, determining the extent to which the unique VR environment as opposed to the use of a WM task was responsible for the reduction of the P3a is very difficult to determine. In this context, <xref ref-type="bibr" rid="B52">Pappalettera et al. (2024)</xref> had participants actively attend to an auditory sequence to detect a rarely occurring target (deviant) stimulus. In different conditions, participants carried out the auditory task outside or within the VR environment. Within the VR environment, participants either were within an office or within an office but also interacting with avatars. The detection of a rare auditory stimulus elicited a P300. The P300 occurs later (around 300 ms) than the P3a and is associated with the active, rather than the passive detection of a rare target stimulus. The auditory P300 in the <xref ref-type="bibr" rid="B52">Pappalettera et al. (2024)</xref> was reduced in amplitude in the two VR conditions, even though the tasks did not require the use of WM. The P3a is usually recorded passively, when participants are not attending to the auditory sequence. Whether VR tasks not making large demands on WM will also modulate the passively-recorded P3a remains to be determined.</p>
<p>The nature of the WM tasks also requires further investigation. When participants are engaged in a <italic>n</italic>-back WM task presented on a 2D computer monitor, previous studies have shown the P3a may be modulated, but the results are inconsistent. In the <italic>n</italic>-back task, the participant is asked to determine whether the current stimulus matches the stimulus presented <italic>n</italic> trials earlier in the sequence. In the <xref ref-type="bibr" rid="B38">Muller-Gass and Schr&#x00F6;ger (2007)</xref> study, a 1-back memory task condition was run in addition to a perceptual task. Participants were asked whether the present short or long duration auditory stimulus was the same duration as the one that had preceded it. The pitch of the frequently occurring standard was at times changed to form a deviant, but the pitch change was irrelevant to the 1-back memory task. The distractor deviant resulted in poorer memory performance. These performance results were similar to those reported by <xref ref-type="bibr" rid="B27">Lavie (2005)</xref>. In addition, a larger P3a was elicited when the participant had to decide whether the duration of the current auditory deviant was also presented in the previous trial (1-back condition) compared to when the participant had to decide about its duration (0-back condition). Thus, the <italic>n</italic>-back task seemed to <italic>enhance</italic> rather than protect against distraction. The effects of a distractor depend on several factors. In <xref ref-type="bibr" rid="B39">Muller-Gass et al. (2007)</xref> study, the auditory distractor occurred within an auditory <italic>n</italic>-back task. Other studies have used a visual <italic>n</italic>-back (<xref ref-type="bibr" rid="B32">Lv et al., 2010</xref>; <xref ref-type="bibr" rid="B63">SanMiguel et al., 2008</xref>). The deviant elicited a P3a that was reduced in amplitude when the <italic>n</italic>-back task was more demanding. On the other hand, <xref ref-type="bibr" rid="B34">Mahajan et al. (2020)</xref> did not find that <italic>n</italic>-back task difficulty had a significant effect on the amplitude of the P3a elicited by the auditory deviants. In the <italic>n</italic>-back studies, the presentation of an irrelevant auditory stimulus prior to the relevant visual stimuli is problematic. While the auditory stimuli were irrelevant to the visual <italic>n</italic>-back, they could still be used as a warning signal or as a cue to predict the subsequent occurrence of the visual target (<xref ref-type="bibr" rid="B4">Baragona et al., 2025</xref>; <xref ref-type="bibr" rid="B53">Parmentier, 2014</xref>). Thus, attending to the auditory sequence could improve performance on the visual WM task. As such, in some conditions, differences in the amplitude of the P3a may have been a result of passive compared to active processing of the deviant. Comparing the P3a elicited in different types of WM tasks (delayed match-to-sample versus n-back tasks) is therefore difficult. Compounding the issue is the fact that while processing demands in the two types of WM task may be very different, the placement of the actual auditory distractor is also quite disparate. In the current delayed match-to-sample tasks, the auditory stimuli occurred in the &#x201C;background&#x201D; and provided no information about the visual stimuli. The P3a was therefore elicited passively. In most <italic>n</italic>-back studies, the auditory stimuli are presented immediately prior to the occurrence of the visual stimuli to-be-remembered. They could thus be informative, providing a cue to the imminent occurrence of the task relevant visual stimuli. The P3a might therefore have been recorded actively.</p>
</sec>
</sec>
<sec id="S4.SS3">
<label>4.3</label>
<title>Disentangling VR and 2D WM tasks</title>
<p>The purpose of the present study was to also determine if the amplitude of the P3a could be modulated by task demands. This goal was successful. Although both the VR and 2D tasks did involve the use of WM, there were wide methodological discrepancies in stimulus and response procedures. Our study was not designed to determine whether VR <italic>per se</italic> or the use of a WM task was responsible for the reduction in the P3a. Future studies could construct a VR WM task with parameters that are much more similar to those of a standard 2D task. However, this might also result in a large reduction of the immersiveness and sense of presence in the VR environment. To determine whether the reduction of the P3a within the VR environment was because of its unusual demands for cognitive resources or because the VR task involved processing associated with WM will require the inclusion of other control conditions. A highly demanding non-WM task could also be used within the VR task for this purpose. It would also be essential to include non-demanding VR conditions. For example, <xref ref-type="bibr" rid="B29">Lier et al. (2020)</xref> had participants watch a video within a VR environment. Painful electric shocks were occasionally delivered but these were irrelevant to the content of the video. The electric shock did elicit a large P3a in a control condition (watching a static image). Its amplitude was, however, much reduced when the participant watched the video.</p>
<p>In the present study, the reduction of the P3a in both WM tasks provided evidence of decreased involuntary attention capture, often associated with distraction. However, conclusions about actual distraction would have required an independent measure of performance on the assigned WM tasks. In many ERP studies, the deterioration in performance as measured by accuracy of responding or response times provides such an independent measure of distraction. A measure of distraction could have been included in the present study by running two other VR WM and 2D WM conditions in which the auditory stimuli were not presented. Presumably, performance on these tasks would have deteriorated when the auditory sequence was presented. These additional tasks would, of course, have increased the duration of an already long testing time. An independent performance measure of distraction by irrelevant auditory has been included in some <italic>n</italic>-back studies. The auditory stimuli can be synchronized to occur just prior to the presentation of the visual stimuli. Presumably if attention is switched to the auditory stimuli, a P3a should be elicited and performance on the visual <italic>n</italic>-back should deteriorate. However, as mentioned previously, such synchronization may result in a confound, the apparently irrelevant auditory stimuli could now act as a relevant warning signal rather than being a distractor. Moreover, even if these confounds could be overcome (see <xref ref-type="bibr" rid="B78">Wetzel et al., 2013</xref>), synching of the auditory and visual stimuli in a delayed match-to-sample paradigm would remain very problematic. Models of the MMN propose that the MMN reflects the output of a rapidly-fading sensory memory. The deviant must occur before the memory representation for the standard fades. The duration of sensory memory has been estimated to last from perhaps 2 to 10 s (<xref ref-type="bibr" rid="B59">Sabri and Campbell, 2001</xref>; <xref ref-type="bibr" rid="B60">Sams et al., 1993</xref>). In the VR WM task, a trial was initiated much more slowly than this.</p>
<p>The influence of task demands on the P3a could also be further explored by examining the influence of WM load. Performance was poorer as WM load increased. Trials could therefore be sorted according to WM load. When the load was high (e.g., 4 items to-be-remembered), it might be expected that the P3a would be reduced compared to when load was low (e.g., 1 item to-be-remembered). Unfortunately, the analysis of the influence of load would probably not permit the use of a multi-feature paradigm. In the present study, a total of 154 of each type of deviant was presented in each condition. Had there been sorted by the four WM loads, then only about 40 trials would have been available for averaging. This low number would have been insufficient to permit reduction of the background EEG noise to permit the ERP signal to emerge.</p>
</sec>
</sec>
</body>
<back>
<sec id="S5" sec-type="data-availability">
<title>Data availability statement</title>
<p>The datasets presented in this article are not readily available because we do not have ethical approval to share data. Requests to access the datasets should be directed to <email xlink:href="mailto:cassandramorrison@cunet.carleton.ca">cassandramorrison@cunet.carleton.ca</email>.</p>
</sec>
<sec id="S6" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Carleton University and the National Research Council of Canada. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided written informed consent to participate in this study.</p>
</sec>
<sec id="S7" sec-type="author-contributions">
<title>Author contributions</title>
<p>FK: Investigation, Writing &#x2013; review &#x0026; editing, Conceptualization, Methodology, Formal analysis, Writing &#x2013; original draft, Visualization. NC: Visualization, Funding acquisition, Methodology, Writing &#x2013; review &#x0026; editing, Conceptualization, Software. AD: Writing &#x2013; review &#x0026; editing, Investigation, Data curation. DS: Formal analysis, Data curation, Investigation, Writing &#x2013; review &#x0026; editing. KC: Investigation, Software, Writing &#x2013; review &#x0026; editing, Visualization, Formal analysis, Writing &#x2013; original draft, Methodology, Conceptualization. CM: Project administration, Visualization, Data curation, Investigation, Conceptualization, Writing &#x2013; review &#x0026; editing, Methodology, Funding acquisition, Supervision.</p>
</sec>
<sec id="S9" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="S10" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="S11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Al Boustani</surname> <given-names>S.</given-names></name> <name><surname>de Diego</surname> <given-names>A. V.</given-names></name> <name><surname>Argelaguet</surname> <given-names>F.</given-names></name> <name><surname>Multon</surname> <given-names>F.</given-names></name></person-group> (<year>2022</year>). <article-title>Effects of virtual reality immersion on cognitive and physiological responses to auditory stimuli.</article-title> <source><italic>Front. Virtual Real.</italic></source> <volume>3</volume>:<fpage>897898</fpage>. <pub-id pub-id-type="doi">10.3389/frvir.2022.897898</pub-id></mixed-citation></ref>
<ref id="B2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alho</surname> <given-names>K.</given-names></name> <name><surname>Woods</surname> <given-names>D. L.</given-names></name> <name><surname>Algazi</surname> <given-names>A.</given-names></name> <name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name></person-group> (<year>1992</year>). <article-title>Intermodal selective attention. II. Effects of attentional load on processing of auditory and visual stimuli in central space.</article-title> <source><italic>Electroencephalogr. Clin. Neurophysiol.</italic></source> <volume>82</volume> <fpage>356</fpage>&#x2013;<lpage>368</lpage>. <pub-id pub-id-type="doi">10.1016/0013-4694(92)90005-3</pub-id> <pub-id pub-id-type="pmid">1374704</pub-id></mixed-citation></ref>
<ref id="B3"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Baddeley</surname> <given-names>A. D.</given-names></name> <name><surname>Hitch</surname> <given-names>G. J.</given-names></name></person-group> (<year>1974</year>). <article-title>&#x201C;Working memory,&#x201D; in</article-title> <source><italic>Recent Advances in Learning and Motivation</italic></source>, <volume>Vol. 47</volume> <role>ed.</role> <person-group person-group-type="editor"><name><surname>Bower</surname> <given-names>G.</given-names></name></person-group> (<publisher-loc>New York</publisher-loc>: <publisher-name>Academic Press</publisher-name>), <fpage>47</fpage>&#x2013;<lpage>89</lpage>. <pub-id pub-id-type="doi">10.1016/s0079-7421(08)60452-1</pub-id></mixed-citation></ref>
<ref id="B4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baragona</surname> <given-names>V.</given-names></name> <name><surname>Schr&#x00F6;ger</surname> <given-names>E.</given-names></name> <name><surname>Widmann</surname> <given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>Salient, unexpected omissions of sounds can involuntarily distract attention.</article-title> <source><italic>J. Cogn. Neurosci.</italic></source> <volume>37</volume> <fpage>1291</fpage>&#x2013;<lpage>1307</lpage>. <pub-id pub-id-type="doi">10.1162/jocn_a_02307</pub-id> <pub-id pub-id-type="pmid">39918914</pub-id></mixed-citation></ref>
<ref id="B5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Berti</surname> <given-names>S.</given-names></name> <name><surname>Schr&#x00F6;ger</surname> <given-names>E.</given-names></name></person-group> (<year>2003</year>). <article-title>Working memory controls involuntary attention switching: Evidence from an auditory distraction paradigm.</article-title> <source><italic>Eur. J. Neurosci.</italic></source> <volume>17</volume> <fpage>1119</fpage>&#x2013;<lpage>1122</lpage>. <pub-id pub-id-type="doi">10.1046/j.1460-9568.2003.02527.x</pub-id> <pub-id pub-id-type="pmid">12653989</pub-id></mixed-citation></ref>
<ref id="B6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Burns</surname> <given-names>C. G.</given-names></name> <name><surname>Fairclough</surname> <given-names>S. H.</given-names></name></person-group> (<year>2015</year>). <article-title>Use of auditory event-related potentials to measure immersion during a computer game.</article-title> <source><italic>Int. J. Hum. Comput. Stud.</italic></source> <volume>73</volume> <fpage>107</fpage>&#x2013;<lpage>114</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijhcs.2014.09.001</pub-id></mixed-citation></ref>
<ref id="B7"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>F.</given-names></name> <name><surname>Zhou</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Yu</surname> <given-names>K.</given-names></name> <name><surname>Arshad</surname> <given-names>S. Z.</given-names></name> <name><surname>Khawaji</surname> <given-names>A.</given-names></name><etal/></person-group> (<year>2014</year>). <source><italic>Robust Multimodal Cognitive Load Measurement.</italic></source> <publisher-loc>Berlin</publisher-loc>: <publisher-name>Springer</publisher-name>, <pub-id pub-id-type="doi">10.1007/978-3-319-07790-1_25</pub-id></mixed-citation></ref>
<ref id="B8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Climent</surname> <given-names>G.</given-names></name> <name><surname>Rodr&#x00ED;guez</surname> <given-names>C.</given-names></name> <name><surname>Garc&#x00ED;a</surname> <given-names>T.</given-names></name> <name><surname>Areces</surname> <given-names>D.</given-names></name> <name><surname>Mej&#x00ED;as</surname> <given-names>M.</given-names></name> <name><surname>Aierbe</surname> <given-names>A.</given-names></name><etal/></person-group> (<year>2021</year>). <article-title>New virtual reality tool (Nesplora Aquarium) for assessing attention and working memory in adults: A normative study.</article-title> <source><italic>Appl. Neuropsychol. Adult</italic>.</source> <year>2021</year> <volume>28</volume> <fpage>403</fpage>&#x2013;<lpage>415</lpage>. <pub-id pub-id-type="doi">10.1080/23279095.2019.1646745</pub-id> <pub-id pub-id-type="pmid">31382773</pub-id></mixed-citation></ref>
<ref id="B9"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Cowan</surname> <given-names>N.</given-names></name></person-group> (<year>1995</year>). <source><italic>Attention and Memory: An Integrated Framework.</italic></source> <publisher-loc>New York</publisher-loc>: <publisher-name>Oxford University Press</publisher-name>.</mixed-citation></ref>
<ref id="B10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cowan</surname> <given-names>N.</given-names></name></person-group> (<year>2001</year>). <article-title>The magical number 4 in short-term memory: A reconsideration of mental storage capacity.</article-title> <source><italic>Behav. Brain Sci.</italic></source> <volume>24</volume> <fpage>87</fpage>&#x2013;<lpage>114</lpage>. <pub-id pub-id-type="doi">10.1017/S0140525X01003922</pub-id> <pub-id pub-id-type="pmid">11515286</pub-id></mixed-citation></ref>
<ref id="B11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Duda-Milloy</surname> <given-names>V.</given-names></name> <name><surname>Tavakoli</surname> <given-names>P.</given-names></name> <name><surname>Campbell</surname> <given-names>K.</given-names></name> <name><surname>Benoit</surname> <given-names>D. L.</given-names></name> <name><surname>Koravand</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>A time-efficient multi-deviant paradigm to determine the effects of gap duration on the mismatch negativity.</article-title> <source><italic>Hear. Res.</italic></source> <volume>377</volume> <fpage>34</fpage>&#x2013;<lpage>43</lpage>. <pub-id pub-id-type="doi">10.1016/j.heares.2019.03.004</pub-id> <pub-id pub-id-type="pmid">30901627</pub-id></mixed-citation></ref>
<ref id="B12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Escera</surname> <given-names>C.</given-names></name> <name><surname>Alho</surname> <given-names>K.</given-names></name> <name><surname>Schr&#x00F6;ger</surname> <given-names>E.</given-names></name> <name><surname>Winkler</surname> <given-names>I.</given-names></name></person-group> (<year>1998</year>). <article-title>Involuntary attention and distractibility as evaluated with event-related brain potentials.</article-title> <source><italic>Audiol. Neurootol.</italic></source> <volume>3</volume> <fpage>151</fpage>&#x2013;<lpage>166</lpage>. <pub-id pub-id-type="doi">10.1159/000013789</pub-id> <pub-id pub-id-type="pmid">9575384</pub-id></mixed-citation></ref>
<ref id="B13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Escera</surname> <given-names>C.</given-names></name> <name><surname>Alho</surname> <given-names>K.</given-names></name> <name><surname>Winkler</surname> <given-names>I.</given-names></name> <name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name></person-group> (<year>2000</year>). <article-title>Neural mechanisms of involuntary attention to acoustic novelty and change.</article-title> <source><italic>J. Cogn. Neurosci.</italic></source> <volume>12</volume> <fpage>920</fpage>&#x2013;<lpage>936</lpage>. <pub-id pub-id-type="doi">10.1162/089892998562997</pub-id> <pub-id pub-id-type="pmid">9802992</pub-id></mixed-citation></ref>
<ref id="B14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fabiani</surname> <given-names>M.</given-names></name> <name><surname>Kazmerski</surname> <given-names>V. A.</given-names></name> <name><surname>Cycowicz</surname> <given-names>Y. M.</given-names></name> <name><surname>Friedman</surname> <given-names>D.</given-names></name></person-group> (<year>1996</year>). <article-title>Naming norms for brief environmental sounds: Effects of age and dementia.</article-title> <source><italic>Psychophysiology</italic></source> <volume>33</volume> <fpage>462</fpage>&#x2013;<lpage>475</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.1996.tb01072.x</pub-id> <pub-id pub-id-type="pmid">8753947</pub-id></mixed-citation></ref>
<ref id="B15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Forster</surname> <given-names>S.</given-names></name> <name><surname>Lavie</surname> <given-names>N.</given-names></name></person-group> (<year>2008</year>). <article-title>Failures to ignore entirely irrelevant distractors: The role of load.</article-title> <source><italic>J. Exp. Psychol. Appl.</italic></source> <volume>14</volume> <fpage>73</fpage>&#x2013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1037/1076-898X.14.1.73</pub-id> <pub-id pub-id-type="pmid">18377168</pub-id></mixed-citation></ref>
<ref id="B16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Grassini</surname> <given-names>S.</given-names></name> <name><surname>Holm</surname> <given-names>S. K.</given-names></name> <name><surname>Railo</surname> <given-names>H.</given-names></name> <name><surname>Koivisto</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Auditory mismatch negativity in virtual reality: An investigation of immersive VR&#x2019;s effects on automatic auditory processing.</article-title> <source><italic>Int. J. Psychophysiol.</italic></source> <volume>164</volume> <fpage>21</fpage>&#x2013;<lpage>28</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2021.02.010</pub-id> <pub-id pub-id-type="pmid">33582219</pub-id></mixed-citation></ref>
<ref id="B17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Greenhouse</surname> <given-names>S. W.</given-names></name> <name><surname>Geisser</surname> <given-names>S.</given-names></name></person-group> (<year>1959</year>). <article-title>On methods in the analysis of profile data.</article-title> <source><italic>Psychometrika</italic></source> <volume>24</volume> <fpage>95</fpage>&#x2013;<lpage>112</lpage>. <pub-id pub-id-type="doi">10.1007/BF02289823</pub-id></mixed-citation></ref>
<ref id="B18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hadjiat</surname> <given-names>Y.</given-names></name> <name><surname>Marchand</surname> <given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Virtual reality and the mediation of acute and chronic pain in adult and pediatric populations: Research developments.</article-title> <source><italic>Front. Pain Res.</italic></source> <volume>3</volume>:<fpage>840921</fpage>. <pub-id pub-id-type="doi">10.3389/fpain.2022.840921</pub-id> <pub-id pub-id-type="pmid">35599969</pub-id></mixed-citation></ref>
<ref id="B19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Honbolyg&#x00F3;</surname> <given-names>F.</given-names></name> <name><surname>Zulauf</surname> <given-names>B.</given-names></name> <name><surname>Zavogianni</surname> <given-names>M. I.</given-names></name> <name><surname>Cs&#x00E9;pe</surname> <given-names>V.</given-names></name></person-group> (<year>2024</year>). <article-title>Investigating the neurocognitive background of speech perception with a fast multi-feature MMN paradigm.</article-title> <source><italic>Biol. Futura</italic></source> <volume>75</volume> <fpage>145</fpage>&#x2013;<lpage>158</lpage>. <pub-id pub-id-type="doi">10.1007/s42977-024-00219-1</pub-id> <pub-id pub-id-type="pmid">38805154</pub-id></mixed-citation></ref>
<ref id="B20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hsu</surname> <given-names>Y. F.</given-names></name> <name><surname>Tu</surname> <given-names>C. A.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>H.</given-names></name></person-group> (<year>2023</year>). <article-title>The mismatch negativity to abstract relationship of tone pairs is independent of attention.</article-title> <source><italic>Sci. Rep.</italic></source> <volume>13</volume>:<fpage>9839</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-023-37131-y</pub-id> <pub-id pub-id-type="pmid">37330612</pub-id></mixed-citation></ref>
<ref id="B21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>James</surname> <given-names>W.</given-names></name></person-group> (<year>1890</year>). <article-title>The perception of reality.</article-title> <source><italic>Principles Psychol.</italic></source> <volume>2</volume> <fpage>283</fpage>&#x2013;<lpage>324</lpage>. <pub-id pub-id-type="doi">10.1037/11059-005</pub-id></mixed-citation></ref>
<ref id="B22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kamal</surname> <given-names>F.</given-names></name> <name><surname>Segado</surname> <given-names>M.</given-names></name> <name><surname>Shaigetz</surname> <given-names>V. G.</given-names></name> <name><surname>Perron</surname> <given-names>M.</given-names></name> <name><surname>Lau</surname> <given-names>B.</given-names></name> <name><surname>Alain</surname> <given-names>C.</given-names></name><etal/></person-group> (<year>2025</year>). <article-title>Exploring working memory across aging using virtual reality.</article-title> <source><italic>Virtual Reality</italic></source> <volume>29</volume>:<fpage>115</fpage>. <pub-id pub-id-type="doi">10.1007/s10055-025-01191-4</pub-id></mixed-citation></ref>
<ref id="B23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kennedy</surname> <given-names>R. S.</given-names></name> <name><surname>Lane</surname> <given-names>N. E.</given-names></name> <name><surname>Berbaum</surname> <given-names>K. S.</given-names></name> <name><surname>Lilienthal</surname> <given-names>M. G.</given-names></name></person-group> (<year>1993</year>). <article-title>Simulator sickness questionnaire: An enhanced method for quantifying simulator sickness.</article-title> <source><italic>Int. J. Aviation Psychol.</italic></source> <volume>3</volume> <fpage>203</fpage>&#x2013;<lpage>220</lpage>. <pub-id pub-id-type="doi">10.1207/s15327108ijap0303_3</pub-id></mixed-citation></ref>
<ref id="B24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Klotzsche</surname> <given-names>F.</given-names></name> <name><surname>Gaebler</surname> <given-names>M.</given-names></name> <name><surname>Villringer</surname> <given-names>A.</given-names></name> <name><surname>Sommer</surname> <given-names>W.</given-names></name> <name><surname>Nikulin</surname> <given-names>V.</given-names></name> <name><surname>Ohl</surname> <given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>Visual short-term memory-related EEG components in a virtual reality setup.</article-title> <source><italic>Psychophysiology</italic></source> <volume>60</volume>:<fpage>e14378</fpage>. <pub-id pub-id-type="doi">10.1111/psyp.14378</pub-id> <pub-id pub-id-type="pmid">37393581</pub-id></mixed-citation></ref>
<ref id="B25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kober</surname> <given-names>S. E.</given-names></name> <name><surname>Neuper</surname> <given-names>C.</given-names></name></person-group> (<year>2012</year>). <article-title>Using auditory event-related EEG potentials to assess presence in virtual reality.</article-title> <source><italic>Int. J. Hum. Comput. Stud.</italic></source> <volume>70</volume> <fpage>577</fpage>&#x2013;<lpage>587</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijhcs.2012.04.002</pub-id></mixed-citation></ref>
<ref id="B26"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kukshinov</surname> <given-names>E.</given-names></name> <name><surname>Tu</surname> <given-names>J.</given-names></name> <name><surname>Szita</surname> <given-names>K.</given-names></name> <name><surname>Senthil Nathan</surname> <given-names>K.</given-names></name> <name><surname>Nacke</surname> <given-names>L. E.</given-names></name></person-group> (<year>2025</year>). <article-title>Widespread yet unreliable: A systematic analysis of the use of presence questionnaires.</article-title> <source><italic>Interacting Comput.</italic></source> <volume>11</volume>:<fpage>iwae064</fpage>. <pub-id pub-id-type="doi">10.1093/iwc/iwae064</pub-id></mixed-citation></ref>
<ref id="B27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lavie</surname> <given-names>N.</given-names></name></person-group> (<year>2005</year>). <article-title>Distracted and confused? Selective attention under load.</article-title> <source><italic>Trends Cogn. Sci.</italic></source> <volume>9</volume> <fpage>75</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2004.12.004</pub-id> <pub-id pub-id-type="pmid">15668100</pub-id></mixed-citation></ref>
<ref id="B28"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lavie</surname> <given-names>N.</given-names></name></person-group> (<year>2010</year>). <article-title>Attention, distraction, and cognitive control under load.</article-title> <source><italic>Curr. Dir. Psychol. Sci.</italic></source> <volume>19</volume> <fpage>143</fpage>&#x2013;<lpage>148</lpage>. <pub-id pub-id-type="doi">10.1177/0963721410370295</pub-id> <pub-id pub-id-type="pmid">26728138</pub-id></mixed-citation></ref>
<ref id="B29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lier</surname> <given-names>E. J.</given-names></name> <name><surname>Oosterman</surname> <given-names>J. M.</given-names></name> <name><surname>Assmann</surname> <given-names>R.</given-names></name> <name><surname>de Vries</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>The effect of virtual reality on evoked potentials following painful electrical stimuli and subjective pain.</article-title> <source><italic>Sci. Rep.</italic></source> <volume>10</volume>:<fpage>9067</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-66035-4</pub-id> <pub-id pub-id-type="pmid">32494060</pub-id></mixed-citation></ref>
<ref id="B30"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Luck</surname> <given-names>S. J.</given-names></name></person-group> (<year>2014</year>). <source><italic>An Introduction to The Event-Related Potential Technique.</italic></source> <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>MIT Press</publisher-name>.</mixed-citation></ref>
<ref id="B31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Luck</surname> <given-names>S. J.</given-names></name> <name><surname>Vogel</surname> <given-names>E. K.</given-names></name></person-group> (<year>2013</year>). <article-title>Visual working memory capacity: From psychophysics and neurobiology to individual differences.</article-title> <source><italic>Trends Cogn. Sci.</italic></source> <volume>17</volume> <fpage>391</fpage>&#x2013;<lpage>400</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2013.06.006</pub-id> <pub-id pub-id-type="pmid">23850263</pub-id></mixed-citation></ref>
<ref id="B32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lv</surname> <given-names>J. Y.</given-names></name> <name><surname>Wang</surname> <given-names>T.</given-names></name> <name><surname>Qiu</surname> <given-names>J.</given-names></name> <name><surname>Feng</surname> <given-names>S. H.</given-names></name> <name><surname>Tu</surname> <given-names>S.</given-names></name> <name><surname>Wei</surname> <given-names>D. T.</given-names></name></person-group> (<year>2010</year>). <article-title>The electrophysiological effect of working memory load on involuntary attention in an auditory&#x2013;visual distraction paradigm: An ERP study.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>205</volume> <fpage>81</fpage>&#x2013;<lpage>86</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-010-2360-x</pub-id> <pub-id pub-id-type="pmid">20628735</pub-id></mixed-citation></ref>
<ref id="B33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Macdonald</surname> <given-names>M.</given-names></name> <name><surname>Campbell</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>Event-related potential measures of a violation of an expected increase and decrease in intensity.</article-title> <source><italic>PLoS One</italic></source> <volume>8</volume>:<fpage>e0076897</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0076897</pub-id> <pub-id pub-id-type="pmid">24143195</pub-id></mixed-citation></ref>
<ref id="B34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mahajan</surname> <given-names>Y.</given-names></name> <name><surname>Kim</surname> <given-names>J.</given-names></name> <name><surname>Davis</surname> <given-names>C.</given-names></name></person-group> (<year>2020</year>). <article-title>Does working memory protect against auditory distraction in older adults?</article-title> <source><italic>BMC Geriatr.</italic></source> <volume>20</volume>:<fpage>515</fpage>. <pub-id pub-id-type="doi">10.1186/s12877-020-01909-w</pub-id> <pub-id pub-id-type="pmid">33256631</pub-id></mixed-citation></ref>
<ref id="B35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Makeig</surname> <given-names>S.</given-names></name> <name><surname>Bell</surname> <given-names>A. J.</given-names></name> <name><surname>Jung</surname> <given-names>T.-P.</given-names></name> <name><surname>Sejnowski</surname> <given-names>T. J.</given-names></name></person-group> (<year>1996</year>). <article-title>Independent component analysis of electroencephalographic data.</article-title> <source><italic>Adv. Neural Inf. Process. Syst.</italic></source> <volume>8</volume> <fpage>145</fpage>&#x2013;<lpage>151</lpage>.</mixed-citation></ref>
<ref id="B36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Marto</surname> <given-names>A.</given-names></name> <name><surname>Gon&#x00E7;alves</surname> <given-names>A.</given-names></name></person-group> (<year>2024</year>). <article-title>A scope of presence-related feelings in AR studies.</article-title> <source><italic>Virtual Reality</italic></source> <volume>28</volume>:<fpage>18</fpage>. <pub-id pub-id-type="doi">10.1007/s10055-023-00908-7</pub-id></mixed-citation></ref>
<ref id="B37"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Morrison</surname> <given-names>C.</given-names></name> <name><surname>Kamal</surname> <given-names>F.</given-names></name> <name><surname>Campbell</surname> <given-names>K.</given-names></name> <name><surname>Taler</surname> <given-names>V.</given-names></name></person-group> (<year>2020</year>). <article-title>The influence of different types of auditory change on processes associated with the switching of attention in younger and older adults.</article-title> <source><italic>Neurobiol. Aging</italic></source> <volume>96</volume> <fpage>197</fpage>&#x2013;<lpage>204</lpage>. <pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2020.09.012</pub-id> <pub-id pub-id-type="pmid">33035845</pub-id></mixed-citation></ref>
<ref id="B38"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muller-Gass</surname> <given-names>A.</given-names></name> <name><surname>Schr&#x00F6;ger</surname> <given-names>E.</given-names></name></person-group> (<year>2007</year>). <article-title>Perceptual and cognitive task difficulty effects on auditory distraction.</article-title> <source><italic>Neurosci. Lett.</italic></source> <volume>426</volume> <fpage>23</fpage>&#x2013;<lpage>28</lpage>. <pub-id pub-id-type="doi">10.1016/j.neulet.2007.08.049</pub-id> <pub-id pub-id-type="pmid">17897782</pub-id></mixed-citation></ref>
<ref id="B39"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muller-Gass</surname> <given-names>A.</given-names></name> <name><surname>Macdonald</surname> <given-names>M.</given-names></name> <name><surname>Schr&#x00F6;ger</surname> <given-names>E.</given-names></name> <name><surname>Sculthorpe</surname> <given-names>L.</given-names></name> <name><surname>Campbell</surname> <given-names>K.</given-names></name></person-group> (<year>2007</year>). <article-title>Evidence for the auditory P3a reflecting an automatic process: Elicitation during highly-focused continuous visual attention.</article-title> <source><italic>Brain Res.</italic></source> <volume>1170</volume> <fpage>71</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1016/j.brainres.2007.07.023</pub-id> <pub-id pub-id-type="pmid">17692834</pub-id></mixed-citation></ref>
<ref id="B40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muller-Gass</surname> <given-names>A.</given-names></name> <name><surname>Stelmack</surname> <given-names>R. M.</given-names></name> <name><surname>Campbell</surname> <given-names>K.</given-names></name></person-group> (<year>2005</year>). <article-title>&#x201C;&#x2026;and were instructed to read a self-selected book while ignoring the auditory stimuli&#x201D;: The effects of task demands on the mismatch negativity.</article-title> <source><italic>Clin. Neurophysiol.</italic></source> <volume>116</volume> <fpage>2142</fpage>&#x2013;<lpage>2152</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2005.05.012</pub-id> <pub-id pub-id-type="pmid">16029961</pub-id></mixed-citation></ref>
<ref id="B41"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muller-Gass</surname> <given-names>A.</given-names></name> <name><surname>Stelmack</surname> <given-names>R. M.</given-names></name> <name><surname>Campbell</surname> <given-names>K. B.</given-names></name></person-group> (<year>2006</year>). <article-title>The effect of visual task difficulty and attentional direction on the detection of acoustic change as indexed by the mismatch negativity.</article-title> <source><italic>Brain Res. 2006</italic></source> <volume>1078</volume> <fpage>112</fpage>&#x2013;<lpage>130</lpage>. <pub-id pub-id-type="doi">10.1016/j.brainres.2005.12.125</pub-id> <pub-id pub-id-type="pmid">16497283</pub-id></mixed-citation></ref>
<ref id="B42"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name></person-group> (<year>1990</year>). <article-title>The role of attention in auditory information processing as revealed by event-related potentials and other brain measures of cognitive function.</article-title> <source><italic>Behav. Brain Sci.</italic></source> <volume>13</volume> <fpage>201</fpage>&#x2013;<lpage>233</lpage>. <pub-id pub-id-type="doi">10.1017/S0140525X00078407</pub-id></mixed-citation></ref>
<ref id="B43"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name> <name><surname>Kujala</surname> <given-names>T.</given-names></name> <name><surname>Winkler</surname> <given-names>I.</given-names></name></person-group> (<year>2011</year>). <article-title>Auditory processing that leads to conscious perception: A unique window to central auditory processing opened by the mismatch negativity and related responses.</article-title> <source><italic>Psychophysiology</italic></source> <volume>48</volume> <fpage>4</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.2010.01114.x</pub-id> <pub-id pub-id-type="pmid">20880261</pub-id></mixed-citation></ref>
<ref id="B44"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name> <name><surname>Paavilainen</surname> <given-names>P.</given-names></name> <name><surname>Tiitinen</surname> <given-names>H.</given-names></name> <name><surname>Jiang</surname> <given-names>D.</given-names></name> <name><surname>Alho</surname> <given-names>K.</given-names></name></person-group> (<year>1993</year>). <article-title>Attention and mismatch negativity.</article-title> <source><italic>Psychophysiology</italic></source> <volume>30</volume> <fpage>436</fpage>&#x2013;<lpage>450</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.1993.tb02067.x</pub-id> <pub-id pub-id-type="pmid">8416070</pub-id></mixed-citation></ref>
<ref id="B45"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name> <name><surname>Pakarinen</surname> <given-names>S.</given-names></name> <name><surname>Rinne</surname> <given-names>T.</given-names></name> <name><surname>Takegata</surname> <given-names>R.</given-names></name></person-group> (<year>2004</year>). <article-title>The mismatch negativity (MMN): Towards the optimal paradigm.</article-title> <source><italic>Clin. Neurophysiol.</italic></source> <volume>115</volume> <fpage>140</fpage>&#x2013;<lpage>144</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2003.04.001</pub-id> <pub-id pub-id-type="pmid">14706481</pub-id></mixed-citation></ref>
<ref id="B46"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nagamine</surname> <given-names>T.</given-names></name></person-group> (<year>2025</year>). <article-title>Challenges in using virtual reality technology for pain relief.</article-title> <source><italic>World J. Clin. Cases</italic></source> <volume>13</volume>:<fpage>103372</fpage>. <pub-id pub-id-type="doi">10.12998/wjcc.v13.i16.103372</pub-id> <pub-id pub-id-type="pmid">40487545</pub-id></mixed-citation></ref>
<ref id="B47"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nannipieri</surname> <given-names>O.</given-names></name></person-group> (<year>2022</year>). <article-title>Do presence questionnaires actually measure presence? A content analysis of presence measurement scales.</article-title> <source><italic>Extend. Real.</italic></source> <volume>13445</volume> <fpage>273</fpage>&#x2013;<lpage>295</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-031-15546-8_24</pub-id></mixed-citation></ref>
<ref id="B48"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oberauer</surname> <given-names>K.</given-names></name> <name><surname>Hein</surname> <given-names>L.</given-names></name></person-group> (<year>2012</year>). <article-title>Attention to information in working memory.</article-title> <source><italic>Curr. Dir. Psychol. Sci.</italic></source> <volume>21</volume> <fpage>164</fpage>&#x2013;<lpage>169</lpage>. <pub-id pub-id-type="doi">10.1177/0963721412444727</pub-id></mixed-citation></ref>
<ref id="B49"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Paavilainen</surname> <given-names>P.</given-names></name></person-group> (<year>2013</year>). <article-title>The mismatch-negativity (MMN) component of the auditory event-related potential to violations of abstract regularities: A review.</article-title> <source><italic>Int. J. Psychophysiol.</italic></source> <volume>88</volume> <fpage>109</fpage>&#x2013;<lpage>123</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2013.03.015</pub-id> <pub-id pub-id-type="pmid">23542165</pub-id></mixed-citation></ref>
<ref id="B50"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pakarinen</surname> <given-names>S.</given-names></name> <name><surname>Lovio</surname> <given-names>R.</given-names></name> <name><surname>Huotilainen</surname> <given-names>M.</given-names></name> <name><surname>Alku</surname> <given-names>P.</given-names></name> <name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name> <name><surname>Kujala</surname> <given-names>T.</given-names></name></person-group> (<year>2009</year>). <article-title>Fast multi-feature paradigm for recording several mismatch negativities (MMNs) to phonetic and acoustic changes in speech sounds.</article-title> <source><italic>Biol Psychol.</italic></source> <volume>82</volume> <fpage>219</fpage>&#x2013;<lpage>226</lpage>. <pub-id pub-id-type="doi">10.1016/j.biopsycho.2009.07.008</pub-id> <pub-id pub-id-type="pmid">19646504</pub-id></mixed-citation></ref>
<ref id="B51"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pakarinen</surname> <given-names>S.</given-names></name> <name><surname>Takegata</surname> <given-names>R.</given-names></name> <name><surname>Rinne</surname> <given-names>T.</given-names></name> <name><surname>Huotilainen</surname> <given-names>M.</given-names></name> <name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name></person-group> (<year>2007</year>). <article-title>Measurement of extensive auditory discrimination profiles using the mismatch negativity (MMN) of the auditory event-related potential (ERP).</article-title> <source><italic>Clin. Neurophysiol.</italic></source> <volume>118</volume> <fpage>177</fpage>&#x2013;<lpage>185</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2006.09.001</pub-id> <pub-id pub-id-type="pmid">17070103</pub-id></mixed-citation></ref>
<ref id="B52"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pappalettera</surname> <given-names>C.</given-names></name> <name><surname>Miraglia</surname> <given-names>F.</given-names></name> <name><surname>Cacciotti</surname> <given-names>A.</given-names></name> <name><surname>Nucci</surname> <given-names>L.</given-names></name> <name><surname>Tufo</surname> <given-names>G.</given-names></name> <name><surname>Rossini</surname> <given-names>P. M.</given-names></name><etal/></person-group> (<year>2024</year>). <article-title>The impact of virtual reality and distractors on attentional processes: Insights from EEG.</article-title> <source><italic>Archiv-Euro J. Phys</italic>.</source> <volume>476</volume> <fpage>1727</fpage>&#x2013;<lpage>1742</lpage>. <pub-id pub-id-type="doi">10.1007/s00424-024-03008-w</pub-id> <pub-id pub-id-type="pmid">39158612</pub-id></mixed-citation></ref>
<ref id="B53"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parmentier</surname> <given-names>F. B. R.</given-names></name></person-group> (<year>2014</year>). <article-title>The cognitive determinants of behavioral distraction by deviant auditory stimuli: A review.</article-title> <source><italic>Psychol. Res.</italic></source> <volume>78</volume> <fpage>321</fpage>&#x2013;<lpage>338</lpage>. <pub-id pub-id-type="doi">10.1007/s00426-013-0534-4</pub-id> <pub-id pub-id-type="pmid">24363092</pub-id></mixed-citation></ref>
<ref id="B54"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parmentier</surname> <given-names>F. B. R.</given-names></name></person-group> (<year>2016</year>). <article-title>Deviant sounds yield distraction irrespective of the sound&#x2019;s informational value.</article-title> <source><italic>J. Exp. Psychol. Hum. Percept. Perform.</italic></source> <volume>42</volume> <fpage>837</fpage>&#x2013;<lpage>846</lpage>. <pub-id pub-id-type="doi">10.1037/xhp0000181</pub-id> <pub-id pub-id-type="pmid">26594883</pub-id></mixed-citation></ref>
<ref id="B55"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Parmentier</surname> <given-names>F. B. R.</given-names></name> <name><surname>Elford</surname> <given-names>G.</given-names></name> <name><surname>Escera</surname> <given-names>C.</given-names></name> <name><surname>Andr&#x00E9;s</surname> <given-names>P.</given-names></name> <name><surname>Barcel&#x00F3;</surname> <given-names>F.</given-names></name></person-group> (<year>2008</year>). <article-title>The cognitive locus of distraction by acoustic novelty in the cross-modal oddball task.</article-title> <source><italic>Cognition</italic></source> <volume>106</volume> <fpage>408</fpage>&#x2013;<lpage>432</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2007.03.008</pub-id> <pub-id pub-id-type="pmid">17445791</pub-id></mixed-citation></ref>
<ref id="B56"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Perrin</surname> <given-names>F.</given-names></name> <name><surname>Pernier</surname> <given-names>J.</given-names></name> <name><surname>Bertrand</surname> <given-names>O.</given-names></name> <name><surname>Echallier</surname> <given-names>J. F.</given-names></name></person-group> (<year>1989</year>). <article-title>Spherical splines for scalp potential and current density mapping.</article-title> <source><italic>Electroencephalogr. Clin. Neurophysiol.</italic></source> <volume>72</volume> <fpage>184</fpage>&#x2013;<lpage>187</lpage>. <pub-id pub-id-type="doi">10.1016/0013-4694(89)90180-6</pub-id> <pub-id pub-id-type="pmid">2464490</pub-id></mixed-citation></ref>
<ref id="B57"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Polich</surname> <given-names>J.</given-names></name></person-group> (<year>2007</year>). <article-title>Updating P300: An integrative theory of P3a and P3b.</article-title> <source><italic>Clin. Neurophysiol.</italic></source> <volume>118</volume> <fpage>2128</fpage>&#x2013;<lpage>2148</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2007.04.019</pub-id> <pub-id pub-id-type="pmid">17573239</pub-id></mixed-citation></ref>
<ref id="B58"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ritter</surname> <given-names>W.</given-names></name> <name><surname>Sussman</surname> <given-names>E.</given-names></name> <name><surname>Deacon</surname> <given-names>D.</given-names></name> <name><surname>Cowan</surname> <given-names>N.</given-names></name> <name><surname>Vaughan</surname> <given-names>H. G.</given-names></name></person-group> (<year>1999</year>). <article-title>Two cognitive systems simultaneously prepared for opposite events.</article-title> <source><italic>Psychophysiology</italic></source> <volume>36</volume> <fpage>835</fpage>&#x2013;<lpage>838</lpage>.</mixed-citation></ref>
<ref id="B59"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sabri</surname> <given-names>M.</given-names></name> <name><surname>Campbell</surname> <given-names>K. B.</given-names></name></person-group> (<year>2001</year>). <article-title>Effects of sequential and temporal probability of deviant occurrence on mismatch negativity.</article-title> <source><italic>Brain Res. Cogn. Brain Res.</italic></source> <volume>12</volume> <fpage>171</fpage>&#x2013;<lpage>180</lpage>. <pub-id pub-id-type="doi">10.1016/s0926-6410(01)00026-x</pub-id> <pub-id pub-id-type="pmid">11489621</pub-id></mixed-citation></ref>
<ref id="B60"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sams</surname> <given-names>M.</given-names></name> <name><surname>Hari</surname> <given-names>R.</given-names></name> <name><surname>Rif</surname> <given-names>J.</given-names></name> <name><surname>Knuutila</surname> <given-names>J.</given-names></name></person-group> (<year>1993</year>). <article-title>The human auditory sensory memory trace persists about 10 sec: Neuromagnetic evidence.</article-title> <source><italic>J. Cogn. Neurosci.</italic></source> <volume>5</volume> <fpage>363</fpage>&#x2013;<lpage>370</lpage>. <pub-id pub-id-type="doi">10.1162/jocn.1993.5.3.363</pub-id> <pub-id pub-id-type="pmid">23972223</pub-id></mixed-citation></ref>
<ref id="B61"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sams</surname> <given-names>M.</given-names></name> <name><surname>Paavilainen</surname> <given-names>P.</given-names></name> <name><surname>Alho</surname> <given-names>K.</given-names></name> <name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name></person-group> (<year>1985</year>). <article-title>Auditory frequency discrimination and event-related potentials.</article-title> <source><italic>Electroencephalogr Clin Neurophysiol.</italic></source> <volume>62</volume> <fpage>437</fpage>&#x2013;<lpage>448</lpage>. <pub-id pub-id-type="doi">10.1016/0168-5597(85)90054-1</pub-id> <pub-id pub-id-type="pmid">2415340</pub-id></mixed-citation></ref>
<ref id="B62"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name></person-group> (<year>2005</year>). <article-title>From presence to consciousness through virtual reality.</article-title> <source><italic>Nat. Rev. Neurosci.</italic></source> <volume>6</volume> <fpage>332</fpage>&#x2013;<lpage>339</lpage>. <pub-id pub-id-type="doi">10.1038/nrn1651</pub-id> <pub-id pub-id-type="pmid">15803164</pub-id></mixed-citation></ref>
<ref id="B63"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>SanMiguel</surname> <given-names>I.</given-names></name> <name><surname>Corral</surname> <given-names>M. J.</given-names></name> <name><surname>Escera</surname> <given-names>C.</given-names></name></person-group> (<year>2008</year>). <article-title>When loading working memory reduces distraction: Behavioral and electrophysiological evidence from an auditory-visual distraction paradigm.</article-title> <source><italic>J. Cong. Neurosci.</italic></source> <volume>20</volume> <fpage>1131</fpage>&#x2013;<lpage>1145</lpage>. <pub-id pub-id-type="doi">10.1162/jocn.2008.20078</pub-id> <pub-id pub-id-type="pmid">18284343</pub-id></mixed-citation></ref>
<ref id="B64"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>SanMiguel</surname> <given-names>I.</given-names></name> <name><surname>Linden</surname> <given-names>D.</given-names></name> <name><surname>Escera</surname> <given-names>C.</given-names></name></person-group> (<year>2010</year>). <article-title>Attention capture by novel sounds: Distraction versus facilitation.</article-title> <source><italic>Eur. J. Cogn. Psychol.</italic></source> <volume>22</volume> <fpage>481</fpage>&#x2013;<lpage>515</lpage>. <pub-id pub-id-type="doi">10.1080/09541440902930994</pub-id></mixed-citation></ref>
<ref id="B65"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sarasso</surname> <given-names>P.</given-names></name> <name><surname>Ronga</surname> <given-names>I.</given-names></name> <name><surname>Piovesan</surname> <given-names>F.</given-names></name> <name><surname>Frascaroli</surname> <given-names>J.</given-names></name> <name><surname>Handjaras</surname> <given-names>G.</given-names></name> <name><surname>Piovesan</surname> <given-names>F.</given-names></name><etal/></person-group> (<year>2024</year>). <article-title>Shared attention in virtual immersive reality enhances electrophysiological correlates of implicit sensory learning.</article-title> <source><italic>Sci. Rep.</italic></source> <volume>14</volume>:<fpage>3767</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-024-53937-w</pub-id> <pub-id pub-id-type="pmid">38355691</pub-id></mixed-citation></ref>
<ref id="B66"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schr&#x00F6;ger</surname> <given-names>E.</given-names></name></person-group> (<year>1996</year>). <article-title>A neural mechanism for involuntary attention shifts to changes in auditory stimulation.</article-title> <source><italic>J Cogn Neurosci.</italic></source> <volume>8</volume> <fpage>527</fpage>&#x2013;<lpage>539</lpage>. <pub-id pub-id-type="doi">10.1162/jocn.1996.8.6.527</pub-id> <pub-id pub-id-type="pmid">23961983</pub-id></mixed-citation></ref>
<ref id="B67"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shaigetz</surname> <given-names>G. V.</given-names></name> <name><surname>Proulx</surname> <given-names>C.</given-names></name> <name><surname>Cabral</surname> <given-names>A.</given-names></name> <name><surname>Choudhury</surname> <given-names>N.</given-names></name> <name><surname>Hewko</surname> <given-names>M.</given-names></name> <name><surname>Kohlenberg</surname> <given-names>E.</given-names></name><etal/></person-group> (<year>2021</year>). <article-title>An immersive and interactive platform for cognitive assessment and rehabilitation (bWell): Design and iterative development process.</article-title> <source><italic>JMIR Rehabil. Assist. Technol.</italic></source> <volume>8</volume>:<fpage>e26629</fpage>. <pub-id pub-id-type="doi">10.2196/26629</pub-id> <pub-id pub-id-type="pmid">34730536</pub-id></mixed-citation></ref>
<ref id="B68"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sussman</surname> <given-names>E. S.</given-names></name></person-group> (<year>2007</year>). <article-title>A new view on the MMN and attention debate: The role of context in processing auditory events.</article-title> <source><italic>J. Psychophysiol.</italic></source> <volume>21</volume> <fpage>164</fpage>&#x2013;<lpage>175</lpage>. <pub-id pub-id-type="doi">10.1027/0269-8803.21.34.164</pub-id></mixed-citation></ref>
<ref id="B69"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sussman</surname> <given-names>E. S.</given-names></name></person-group> (<year>2017</year>). <article-title>Auditory scene analysis: An attention perspective.</article-title> <source><italic>J. Speech Lang. Hear. Res.</italic></source> <volume>60</volume> <fpage>2989</fpage>&#x2013;<lpage>3000</lpage>. <pub-id pub-id-type="doi">10.1044/2017_JSLHR-H-17-0041</pub-id> <pub-id pub-id-type="pmid">29049599</pub-id></mixed-citation></ref>
<ref id="B70"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sussman</surname> <given-names>E.</given-names></name> <name><surname>Winkler</surname> <given-names>I.</given-names></name></person-group> (<year>2001</year>). <article-title>Dynamic sensory updating in the auditory system.</article-title> <source><italic>Brain Res. Cogn. Brain Res.</italic></source> <volume>12</volume> <fpage>431</fpage>&#x2013;<lpage>439</lpage>. <pub-id pub-id-type="doi">10.1016/s0926-6410(01)00067-2</pub-id> <pub-id pub-id-type="pmid">11689303</pub-id></mixed-citation></ref>
<ref id="B71"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tavakoli</surname> <given-names>P.</given-names></name> <name><surname>Campbell</surname> <given-names>K.</given-names></name></person-group> (<year>2016</year>). <article-title>Can an auditory multi-feature optimal paradigm be used for the study of processes associated with attention capture in passive listeners?</article-title> <source><italic>Brain Res.</italic></source> <volume>1648</volume> <fpage>394</fpage>&#x2013;<lpage>408</lpage>. <pub-id pub-id-type="doi">10.1016/j.brainres.2016.08.003</pub-id> <pub-id pub-id-type="pmid">27495985</pub-id></mixed-citation></ref>
<ref id="B72"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tavakoli</surname> <given-names>P.</given-names></name> <name><surname>Dale</surname> <given-names>A.</given-names></name> <name><surname>Boafo</surname> <given-names>A.</given-names></name> <name><surname>Campbell</surname> <given-names>K.</given-names></name></person-group> (<year>2019</year>). <article-title>Evidence of P3a during sleep, a process associated with intrusions into consciousness in the waking state.</article-title> <source><italic>Front. Neurosci.</italic></source> <volume>12</volume>:<fpage>1028</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2018.01028</pub-id> <pub-id pub-id-type="pmid">30686989</pub-id></mixed-citation></ref>
<ref id="B73"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Teh</surname> <given-names>J. J.</given-names></name> <name><surname>Pascoe</surname> <given-names>D. J.</given-names></name> <name><surname>Hafeji</surname> <given-names>S.</given-names></name> <name><surname>Parchure</surname> <given-names>R.</given-names></name> <name><surname>Koczoski</surname> <given-names>A.</given-names></name> <name><surname>Rimmer</surname> <given-names>M. P.</given-names></name></person-group> (<year>2024</year>). <article-title>Efficacy of virtual reality for pain relief in medical procedures: A systematic review and meta-analysis.</article-title> <source><italic>BMC Med.</italic></source> <volume>22</volume>:<fpage>64</fpage>. <pub-id pub-id-type="doi">10.1186/s12916-024-03266-6</pub-id> <pub-id pub-id-type="pmid">38355563</pub-id></mixed-citation></ref>
<ref id="B74"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Terkildsen</surname> <given-names>T.</given-names></name> <name><surname>Makransky</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>Measuring presence in virtual reality: An overview of current methods.</article-title> <source><italic>Behav. Res. Methods</italic></source> <volume>51</volume> <fpage>1945</fpage>&#x2013;<lpage>1962</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-019-01281-9</pub-id></mixed-citation></ref>
<ref id="B75"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tiitinen</surname> <given-names>H.</given-names></name> <name><surname>May</surname> <given-names>P.</given-names></name> <name><surname>Reinikainen</surname> <given-names>K.</given-names></name> <name><surname>N&#x00E4;&#x00E4;t&#x00E4;nen</surname> <given-names>R.</given-names></name></person-group> (<year>1994</year>). <article-title>Attentive novelty detection in humans is governed by pre-attentive sensory memory.</article-title> <source><italic>Nature</italic></source> <volume>372</volume> <fpage>90</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1038/372090a0</pub-id> <pub-id pub-id-type="pmid">7969425</pub-id></mixed-citation></ref>
<ref id="B76"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Viderman</surname> <given-names>D.</given-names></name> <name><surname>Tapinova</surname> <given-names>K.</given-names></name> <name><surname>Aubakirova</surname> <given-names>M.</given-names></name> <name><surname>Abdildin</surname> <given-names>Y. G.</given-names></name></person-group> (<year>2023</year>). <article-title>The prevalence of pain in chronic diseases: An umbrella review of systematic reviews.</article-title> <source><italic>J Clin Med. 25</italic></source> <volume>12</volume>:<fpage>7302</fpage>. <pub-id pub-id-type="doi">10.3390/jcm12237302</pub-id> <pub-id pub-id-type="pmid">38068354</pub-id></mixed-citation></ref>
<ref id="B77"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Volosin</surname> <given-names>M.</given-names></name> <name><surname>Horv&#x00E1;th</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Task difficulty modulates distraction effects: An ERP study.</article-title> <source><italic>Neuropsychologia</italic></source> <volume>142</volume>:<fpage>107447</fpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2020.107447</pub-id> <pub-id pub-id-type="pmid">32243885</pub-id></mixed-citation></ref>
<ref id="B78"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wetzel</surname> <given-names>N.</given-names></name> <name><surname>Schr&#x00F6;ger</surname> <given-names>E.</given-names></name> <name><surname>Widmann</surname> <given-names>A.</given-names></name></person-group> (<year>2013</year>). <article-title>The dissociation between the P3a event-related potential and behavioral distraction.</article-title> <source><italic>Psychophysiology</italic></source> <volume>50</volume> <fpage>920</fpage>&#x2013;<lpage>930</lpage>. <pub-id pub-id-type="doi">10.1111/psyp.12072</pub-id> <pub-id pub-id-type="pmid">23763292</pub-id></mixed-citation></ref>
<ref id="B79"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Winkler</surname> <given-names>I.</given-names></name></person-group> (<year>2007</year>). <article-title>Interpreting the mismatch negativity.</article-title> <source><italic>J. Psychophysiol.</italic></source> <volume>21</volume> <fpage>147</fpage>&#x2013;<lpage>163</lpage>. <pub-id pub-id-type="doi">10.1027/0269-8803.21.34.147</pub-id></mixed-citation></ref>
<ref id="B80"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Winkler</surname> <given-names>I.</given-names></name> <name><surname>Denham</surname> <given-names>S. L.</given-names></name> <name><surname>Nelken</surname> <given-names>I.</given-names></name></person-group> (<year>2009</year>). <article-title>Modeling the auditory scene: Predictive regularity representations and perceptual objects.</article-title> <source><italic>Trends Cogn. Sci.</italic></source> <volume>13</volume> <fpage>532</fpage>&#x2013;<lpage>540</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2009.09.003</pub-id> <pub-id pub-id-type="pmid">19828357</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/55251/overview">Achim Klug</ext-link>, University of Colorado Anschutz Medical Campus, United States</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/670384/overview">Panagiotis Kourtesis</ext-link>, American College of Greece, Greece</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2948968/overview">Benjamin Stodt</ext-link>, Leibniz Research Centre for Working Environment and Human Factors (IfADo), Germany</p></fn>
</fn-group>
</back>
</article>