<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1623584</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2025.1623584</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Virtual Reality</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Effects of stress on perceptual learning in a virtual reality environment</article-title>
<alt-title alt-title-type="left-running-head">Cass et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2025.1623584">10.3389/frvir.2025.1623584</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Cass</surname>
<given-names>John</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/726311/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Fu</surname>
<given-names>Wing Hong</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3192939/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Yanping</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3069520/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cahill</surname>
<given-names>Larissa</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Weidemann</surname>
<given-names>Gabrielle</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/337821/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>School of Psychology, Western Sydney University</institution>, <addr-line>Sydney</addr-line>, <addr-line>NSW</addr-line>, <country>Australia</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>MARCS Institute for Brain, Behaviour and Development, Western Sydney University</institution>, <addr-line>Sydney</addr-line>, <addr-line>NSW</addr-line>, <country>Australia</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Department of Defence, Defence Sciences Technology Group</institution>, <addr-line>Melbourne</addr-line>, <addr-line>VIC</addr-line>, <country>Australia</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2893445/overview">Brian Klebig</ext-link>, Bethany Lutheran College, United States</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/831834/overview">Hui Chen</ext-link>, Chinese Academy of Sciences (CAS), China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2202510/overview">Ying Yang</ext-link>, Monash University, Australia</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: John Cass, <email>j.cass@westernsydney.edu.au</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>23</day>
<month>09</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>6</volume>
<elocation-id>1623584</elocation-id>
<history>
<date date-type="received">
<day>06</day>
<month>05</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>08</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2025 Cass, Fu, Li, Cahill and Weidemann.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Cass, Fu, Li, Cahill and Weidemann</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>This study investigates two key questions in perceptual learning using a ten-day visual training protocol embedded in a first-person shooter (FPS) task within virtual reality (VR): (1) whether training improves the visual system&#x2019;s ability to integrate orientation information across the visual field, and (2) whether aversive electrodermal stimulation and associated stress levels influence perceptual performance and/or learning.</p>
</sec>
<sec>
<title>Method</title>
<p>17 participants successfully completed an orientation-averaging task involving Gabor arrays of varying set-sizes (1, 2, 4, or 8 elements) under three shock conditions: no shock, performance-contingent shock, and random shock.</p>
</sec>
<sec>
<title>Results</title>
<p>Training led to improvements in both accuracy and response times, while increasing set-size monotonically impaired performance. The interaction between training and set-size was weak, suggesting that training benefits likely emerge at a post-integration or decisional stage. Stress responses, indexed by the State-Trait Anxiety Inventory (STAI), confirmed elevated anxiety in both shock conditions compared to control. However, this increase in state anxiety did not reliably affect task performance or learning outcomes, nor did it modulate set-size effects. Notably, participants&#x2019; accuracy degraded when an on-screen health bar depicted lower &#x201C;health&#x201d; levels, regardless of actual shock delivery or contingency.</p>
</sec>
<sec>
<title>Discussion</title>
<p>These findings indicate that visual feedback cues can shape engagement or motivation independently of experienced stress. More broadly, the results underscore the role of motivational and attentional mechanisms in immersive training environments and provide a framework for evaluating stress effects using subjective anxiety and objective psychophysical measures.</p>
</sec>
</abstract>
<kwd-group>
<kwd>visual perception</kwd>
<kwd>stress</kwd>
<kwd>ensemble processing</kwd>
<kwd>visual capacity</kwd>
<kwd>psychophysics</kwd>
<kwd>orientation processing</kwd>
<kwd>first-person shooter (FPS)</kwd>
</kwd-group>
<contract-sponsor id="cn001">Defence Science and Technology Group<named-content content-type="fundref-id">10.13039/501100008812</named-content>
</contract-sponsor>
<counts>
<page-count count="13"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Virtual Reality and Human Behaviour</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>Modern visual displays&#x2014;including high-resolution monitors and head-mounted systems&#x2014;can present complex, dynamic environments with exceptional spatial and temporal precision (<xref ref-type="bibr" rid="B56">Zhao et al., 2022</xref>). However, despite this technological fidelity, the human visual system is constrained by sensory, attentional, and contextual factors that limit perceptual access to all available input (<xref ref-type="bibr" rid="B20">Gilbert et al., 2000</xref>; <xref ref-type="bibr" rid="B8">Carrasco and Frieder, 1997</xref>; <xref ref-type="bibr" rid="B23">He et al., 1996</xref>; <xref ref-type="bibr" rid="B9">Carrasco et al., 1995</xref>).</p>
<p>Among these modulators, experience plays a critical role. In the short term, perceptual systems exhibit adaptation, whereby recent exposure to stimuli affects the processing of subsequent inputs (<xref ref-type="bibr" rid="B24">Heinemann and Marill, 1954</xref>; <xref ref-type="bibr" rid="B5">Blakemore and Nachmias, 1971</xref>; <xref ref-type="bibr" rid="B42">Rideaux et al., 2023</xref>). Adaptation effects are typically transient, decaying over seconds to minutes depending on the stimulus and task (<xref ref-type="bibr" rid="B55">Zhang et al., 2023</xref>; <xref ref-type="bibr" rid="B27">Jones and Holding, 1975</xref>). These passive effects contrast with longer-lasting improvements achieved through active engagement&#x2014;namely, perceptual learning. Perceptual learning refers to sustained enhancements in stimulus discrimination or detection that result from practice and task-specific exposure (<xref ref-type="bibr" rid="B22">Goldstone, 1998</xref>; <xref ref-type="bibr" rid="B39">Prettyman, 2019</xref>). Such learning influences both low-level sensory processing (<xref ref-type="bibr" rid="B37">Polat and Sagi, 1994</xref>; <xref ref-type="bibr" rid="B53">Yehezkel et al., 2015</xref>) and higher-order attentional deployment across the visual field (<xref ref-type="bibr" rid="B14">Donovan and Carrasco, 2018</xref>).</p>
<sec id="s1-1">
<title>1.1 Related work</title>
<sec id="s1-1-1">
<title>1.1.1 Ensemble perception and sampling efficiency</title>
<p>One class of perceptual task in which perceptual learning has a demonstrable impact is ensemble perception, where observers estimate global statistical properties&#x2014;such as the average, variance, or range&#x2014;across a set of spatially distributed elements (<xref ref-type="bibr" rid="B52">Whitney and Yamanashi Leib, 2018</xref>; <xref ref-type="bibr" rid="B36">Moerel et al., 2016</xref>). Orientation averaging tasks using Gabor patches&#x2014;sinusoidal gratings commonly used in vision science to assess orientation sensitivity&#x2014;are often used to study this process. Using an orientation averaging task, <xref ref-type="bibr" rid="B36">Moerel et al. (2016)</xref> demonstrated that accuracy improves with practice, which they attributed to increased <italic>sampling efficiency</italic>: the effective number of local elements integrated by the visual system to estimate the global average.</p>
</sec>
<sec id="s1-1-2">
<title>1.1.2 Attention and computational stages</title>
<p>
<xref ref-type="bibr" rid="B11">Dakin et al. (2009)</xref> further linked sampling efficiency to attentional resources by employing a dual-task paradigm. Participants simultaneously performed a central task and an orientation-averaging task in the periphery. When central attentional load increased, orientation-averaging performance declined. This led to two theoretical accounts. The early selection account posits that attention limits how many local signals reach the integration stage. Alternatively, the late selection account suggests that integration occurs across all inputs, but attention modulates signal-to-noise at a later, decisional stage.</p>
</sec>
<sec id="s1-1-3">
<title>1.1.3 Aversive conditioning and perception</title>
<p>Affective and motivational factors also influence perception. Aversive conditioning&#x2014;where a neutral stimulus is paired with an unpleasant outcome&#x2014;has been shown to enhance discrimination by increasing the distinctiveness of stimulus features (<xref ref-type="bibr" rid="B41">Rhodes et al., 2018</xref>; <xref ref-type="bibr" rid="B32">Li et al., 2008</xref>; <xref ref-type="bibr" rid="B30">Lawrence, 1949</xref>; <xref ref-type="bibr" rid="B48">Stegmann et al., 2021</xref>). However, the literature is mixed: other studies have reported decrements in perceptual performance following exposure to aversive stimuli (<xref ref-type="bibr" rid="B40">Resnik et al., 2011</xref>; <xref ref-type="bibr" rid="B46">Shalev et al., 2018</xref>).</p>
</sec>
<sec id="s1-1-4">
<title>1.1.4 Stress and attention</title>
<p>One proposed mechanism for these opposing effects is stress. Exposure to unpredictable or intense aversive stimuli triggers a stress response involving elevated cortisol and sympathetic nervous system activation (<xref ref-type="bibr" rid="B26">Herman et al., 2016</xref>; <xref ref-type="bibr" rid="B2">Arnsten, 2009</xref>). This cascade can impair frontally mediated attentional functions, particularly in high-demand tasks (<xref ref-type="bibr" rid="B16">Ebersole, 2016</xref>; <xref ref-type="bibr" rid="B43">Sanger et al., 2014</xref>; <xref ref-type="bibr" rid="B12">de Voogd et al., 2022</xref>; <xref ref-type="bibr" rid="B13">Dinse et al., 2017</xref>). As orientation averaging relies on attention, it is plausible that stress may reduce sampling efficiency, either by limiting the number of integrated samples (early selection) or by degrading decision-level processing (late selection).</p>
</sec>
<sec id="s1-1-5">
<title>1.1.5 Motivation and aversive reinforcement</title>
<p>Conversely, performance-contingent punishment&#x2014;such as shocks administered after incorrect responses&#x2014;may enhance performance through increased task motivation. This process, grounded in operant conditioning, has been shown to improve perceptual accuracy when feedback is contingent and behaviourally relevant (<xref ref-type="bibr" rid="B51">Watson et al., 2019</xref>; <xref ref-type="bibr" rid="B44">Sawaki et al., 2015</xref>; <xref ref-type="bibr" rid="B18">Erickson, 1970</xref>; <xref ref-type="bibr" rid="B6">Blank et al., 2013</xref>). Such effects have been attributed to motivational sharpening of attention and response caution.</p>
</sec>
</sec>
<sec id="s1-2">
<title>1.2 Research aims and hypotheses</title>
<p>The present study explores how training and affective context (via aversive electrodermal stimulation) influence orientation-averaging performance in an immersive virtual reality (VR) environment. Specifically, we investigate:<list list-type="simple">
<list-item>
<p>1. Whether perceptual training improves ensemble processing via early-stage sampling or late-stage decision efficiency.</p>
</list-item>
<list-item>
<p>2. How acute stress (induced by random shocks) and motivation (induced by performance-contingent shocks) modulate perceptual performance.</p>
</list-item>
<list-item>
<p>3. Whether these effects interact with the quantity of task-relevant information (set-size: 1, 2, 4, or 8 Gabors).</p>
</list-item>
</list>
</p>
<p>Our hypotheses are as follows:<list list-type="simple">
<list-item>
<p>&#x2022; H1: If training improves early-stage sampling, then increasing set-size will result in shallower performance declines over time (i.e., improved slope). If training improves late-stage decision processes, accuracy will increase overall, but the slope of set-size degradation will remain constant.</p>
</list-item>
<list-item>
<p>&#x2022; H2: If acute stress impairs attentional capacity, we expect lower performance in the random shock group compared to the no shock and performance-contingent shock groups.</p>
</list-item>
<list-item>
<p>&#x2022; H3: If motivational contingencies improve performance, accuracy in the performance-contingent shock group will exceed that in the random shock group.</p>
</list-item>
<list-item>
<p>&#x2022; H4: If stress impacts early encoding, accuracy will decline more steeply with set-size in the shock conditions. If stress or motivation affects late decision stages, we expect group differences in accuracy without significant changes in slope.</p>
</list-item>
</list>
</p>
</sec>
<sec id="s1-3">
<title>1.3 Experimental overview</title>
<p>We employed a 10-day longitudinal design, in which 17 participants completed a VR-based shooting task involving orientation averaging of Gabor stimuli. Gabor arrays were positioned in fixed circular configurations to ensure consistent visual sampling across trials. This design allowed for controlled manipulation of set-size while minimising confounds due to spatial unpredictability, which could affect attention allocation.</p>
<p>Participants were randomly assigned to one of three groups: no shock, performance-contingent shock, or random shock, and we measured accuracy, response time, and anxiety levels using validated instruments (e.g., STAI). While the 10-day period aligns with prior perceptual learning studies indicating reliable gains over such durations (<xref ref-type="bibr" rid="B36">Moerel et al., 2016</xref>), future work may examine shorter or longer timelines.</p>
<p>This study seeks to clarify the mechanisms by which perceptual learning unfolds and the influence of stress and motivation on visual decision-making in immersive environments&#x2014;critical insights for adaptive training and human performance optimisation in VR.</p>
</sec>
</sec>
<sec sec-type="methods" id="s2">
<title>2 Methods</title>
<sec id="s2-1">
<title>2.1 Participants</title>
<p>Twenty-two participants were recruited from the Western Sydney University community via convenience sampling between 1st April and 31st July 2022. Two participants withdrew from the study&#x2014;one due to illness (COVID-19) and one voluntarily&#x2014;leaving 20 who completed all testing sessions. Three additional participants were excluded: one for non-compliance with task instructions, and two for receiving an unusually high number of shocks due to technical error. The final sample comprised 17 healthy adults (8 female; 9 male; mean age &#x3d; 26.2&#xa0;years, SD &#x3d; 12.4), all with normal or corrected-to-normal vision.</p>
<p>All participants provided informed consent prior to participation. The study was approved by the Western Sydney University Human Research Ethics Committee (H13736) and conducted in accordance with the Declaration of Helsinki. Participants were na&#xef;ve to the study&#x2019;s aims and received $250 AUD for their participation.</p>
</sec>
<sec id="s2-2">
<title>2.2 Experimental design</title>
<p>The experiment used a 3 (Shock Group: No Shock, Random Shock, Performance-Contingent Shock) &#xd7; 10 (Training Day) &#xd7; 5 (Set-Size: 1, 2, 3, 4, or 8 Gabor elements) mixed factorial design. Shock group was a between-subjects factor; training day and set-size were within-subjects factors. Each participant completed 10 training sessions on consecutive weekdays.</p>
<p>Participants were randomly allocated to one of three shock conditions. In the performance-contingent shock group, participants received an electrodermal stimulus following every 10th incorrect response. In the random shock group, shocks were administered with a 3% chance per trial, independent of performance. This probability was determined via pilot testing to approximate the average frequency of shocks received by the performance-contingent group. Participants in the no-shock group received no stimulation.</p>
<p>Average daily shocks were 10.9 (SD &#x3d; 2.3) in the performance-contingent group and 13.1 (SD &#x3d; 3.0) in the random shock group.</p>
<p>The key outcome measures were orientation discrimination accuracy, correct response time (RT), and state anxiety scores (via the STAI).</p>
</sec>
<sec id="s2-3">
<title>2.3 Task overview</title>
<p>Participants performed a custom-built virtual reality orientation-averaging task embedded in a first-person shooter (FPS) game. On each trial, they viewed an array of Gabor elements (stimulus set-size varied across trials) and judged whether the average orientation was tilted clockwise or counter clockwise from vertical (<xref ref-type="fig" rid="F1">Figure 1</xref>). Participants responded by shooting one of two visually identical virtual agents, each standing to the left or right of a central post, depending on the inferred average orientation of the Gabors.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>First-person perspective of the VR D-world environment and Gabor arrays. Gabor stimuli of varying set-sizes (1, 2, 3, 4 and 8) illustrated in panels <bold>(a&#x2013;e)</bold> respectively. Note on each trial the average orientation was plus or minus 5&#xb0; from vertical. Images all show Gabors tilted &#x2212;5&#xb0; from vertical on average, signifying that the entity on the left is the enemy.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g001.tif">
<alt-text content-type="machine-generated">Five images labeled (a) to (e) show a soldier in a field with mountains in the background. Each image has a different number of white streaks representing set sizes from 1 to 8. Streaks increase from one in image (a) to eight in image (e). A translucent gray overlay highlights the area with streaks in each image.</alt-text>
</graphic>
</fig>
<p>Correct responses prevented hostile fire, while incorrect responses triggered either a visual consequence (loss of health bar) or, in shock conditions, electrodermal stimulation (depending on shock group). The task structure was designed to promote integration of ensemble visual information and translate perceptual decisions into motor actions in an immersive environment.</p>
</sec>
<sec id="s2-4">
<title>2.4 Materials and apparatus</title>
<sec id="s2-4-1">
<title>2.4.1 Software</title>
<p>The virtual environment was developed using the Unity engine by MultiSim<sup>&#xa9;</sup> and presented via a custom-built &#x2018;D-world&#x2019; module. The D-world simulated a rural Swiss landscape (coordinates: 46.727&#xb0; N, 12.219&#xb0; E) and included all in-game events and stimuli. System events and behavioural data were recorded in H5 format using a built-in logger with a maximum sampling rate of 500&#xa0;Hz. Only state changes triggered new entries, optimizing data file size.</p>
<p>A separate Python script (v3.8.6) synchronized experimental events, controlled shock delivery, and parsed task performance.</p>
</sec>
<sec id="s2-4-2">
<title>2.4.2 Hardware</title>
<p>The experiment was run on two Windows 10&#xa0;PCs: one for rendering the D-world (Intel Core i7-9700K, NVIDIA GTX 2060), and another for VR stimulus presentation and headset output (Intel Core i7-9700K, NVIDIA GTX 2070 SUPER). Audio feedback was delivered via Philips/Gibson headphones.</p>
<p>Participants viewed stimuli using an Oculus Rift S headset (1,280 &#xd7; 1,440 pixels per eye; 80&#xa0;Hz refresh rate). Interpupillary distances (IPD) were measured using the &#x2018;GlassesOn&#x2019; mobile app (<xref ref-type="bibr" rid="B33">LTD, 2022</xref>) and matched to the headset&#x2019;s software-adjustable settings (range: 58&#x2013;69&#xa0;mm).</p>
<p>A custom-built replica rifle prop (988&#xa0;g) (<xref ref-type="fig" rid="F2">Figure 2a</xref>) housed a left-handed Oculus Touch controller to track six degrees of freedom. The prop was ergonomically adapted and visually matched to an EF88 AUS Steyr assault rifle.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Experimental apparatus and electrodermal stimulus set-up. <bold>(a)</bold> Shows a participant wearing the VR Oculus headset and headphones holding the controller-mounted custom rifle prop with electrodes applied to the non-dominant forearm. <bold>(b)</bold> Shows the Biopac MP-150 signal conditioning module and STMISOC electrodermal stimulation device.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g002.tif">
<alt-text content-type="machine-generated">(a) A person wearing a virtual reality headset and headphones holds a device with a long handle and orange end, resembling a controller. (b) A setup of BIOMAC Systems, Inc. equipment, including the MP150 module and various connected devices with multiple ports and labels.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s2-4-3">
<title>2.4.3 Electrodermal stimulation</title>
<p>Electrodermal stimuli were delivered using a Biopac MP-150 with an STMISOC isolated stimulator (<xref ref-type="fig" rid="F2">Figure 2B</xref>). Participants in shock conditions self-adjusted the stimulation intensity prior to each session, targeting a level they reported as &#x201c;uncomfortable but not painful.&#x201d; Stimulation pulses were 100 ms in duration, with selected currents ranging from 30 to 90&#xa0;&#xb5;A (mean &#x3d; 62.7 &#xb5;A, SD &#x3d; 14.3).</p>
</sec>
<sec id="s2-4-4">
<title>2.4.4 Subjective measures</title>
<p>State and trait anxiety were measured using the State-Trait Anxiety Inventory for Adults (STAI-AD) (<xref ref-type="bibr" rid="B47">Spielberger et al., 1983</xref>), administered pre- and post-session via Qualtrics on a tablet. The STAI-AD includes 40 items rated on a 4-point Likert scale. It has high reliability (&#x3b1; &#x3d; 0.86&#x2013;0.95) and strong construct validity.</p>
</sec>
</sec>
<sec id="s2-5">
<title>2.5 Stimuli and virtual environment</title>
<p>The virtual environment included a central pole flanked by two stationary soldier agents (98 &#xd7; 256 pixels), one of whom was designated &#x201c;hostile&#x201d; each trial. Hostility was randomly assigned on each trial and signaled solely via the mean orientation of the Gabor array: &#x2212;5&#xb0; indicated the left agent was hostile; &#x2b;5&#xb0; indicated the right.</p>
<p>Stimuli appeared in a semi-transparent 1,024 &#xd7; 1,024 pixel square, fixed to the participant&#x2019;s visual field. The health bar in the lower-right corner reduced by 10% after incorrect responses for all participants, acting as performance feedback and shock countdown (in the performance-contingent group). Following any trial in which the health bar reached 0%, it was reset to 100% at the start of the subsequent trial.</p>
<sec id="s2-5-1">
<title>2.5.1 Gabor arrays</title>
<p>Each trial presented 1, 2, 3, 4, or 8 Gabor patches arranged equidistantly, but otherwise randomly positioned on a circular annulus (radius: 400 pixels; &#x223c;25&#xb0; visual angle). Each Gabor was a sinusoidal grating (0.8&#x2013;1.6 cycles/deg) with a Gaussian envelope (FWHM &#x3d; 83 pixels). For multi-element trials, orientations were sampled from a Gaussian distribution (SD &#x3d; 20&#xb0;) with a mean of &#xb1;5&#xb0; from vertical. The final Gabor&#x2019;s orientation was adjusted to ensure the overall mean orientation met the &#xb1;5&#xb0; offset exactly. <xref ref-type="fig" rid="F1">Figure 1</xref> illustrates all five set-size conditions.</p>
</sec>
<sec id="s2-5-2">
<title>2.5.2 Trial timing and response</title>
<p>Each trial began with a 500 ms system initialization delay, followed by display of the visual field square. After 500 ms, the Gabor array appeared and remained visible for up to 2,500 ms or until the participant shot a target. Reaction time was calculated from Gabor onset to response. If no response was made within 2,500 ms, an animation showed the hostile agent firing at the participant.</p>
<p>Incorrect trials (shooting the friendly agent or failing to shoot the hostile one) triggered a 10% reduction in the health bar. In the performance-contingent group, a shock was administered when the health bar reached 0%. In the random group, shocks were probabilistic and unrelated to health status.</p>
</sec>
</sec>
<sec id="s2-6">
<title>2.6 Procedure</title>
<p>All participants completed 10 testing sessions on consecutive weekdays. On Day 1, participants were briefed, consented, and randomly assigned to a shock group (No Shock: n &#x3d; 5; Random Shock: n &#x3d; 5; Performance-Contingent: n &#x3d; 7). After measuring IPD and fitting the headset, participants completed a practice session (&#x223c;20 trials) to ensure task understanding.</p>
<p>Shock group participants underwent a daily calibration to identify their preferred stimulation level. Each testing session consisted of two blocks of 210 trials (42 per set-size), separated by a 5&#x2013;10&#xa0;min break. Participants completed the STAI before and after each session.</p>
</sec>
<sec id="s2-7">
<title>2.7 Ethical considerations and data availability</title>
<p>The experiment was approved by the WSU Human Research Ethics Committee (H13736). All procedures complied with the Declaration of Helsinki. This study was not preregistered. De-identified data and analysis scripts are available upon request.</p>
</sec>
<sec id="s2-8">
<title>2.8 Competing interests statement</title>
<p>The authors declare no competing interests.</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>3 Results</title>
<p>In this study we assessed two measures of behavioural performance, accuracy and correct response times. Accuracy was indexed as the proportion of trials in which the participant correctly identified and shot the hostile agent prior to themselves being shot by the hostile agent. Trials in which the participant shot the friendly agent or were shot by the hostile agent were considered incorrect trials. Response times refer to the time between the appearance of the diagnostic Gabor elements and the participant shooting the hostile agent. Only &#x2018;correct&#x2019; trials (those where the participant shot the hostile agent before the hostile agent was able to shoot) were used in the calculation of response times.</p>
<p>Performance analyses were conducted using linear mixed-effects models and growth curve analysis, in accordance with established methods for longitudinal performance modeling.</p>
<p>The overall effects of set-size and training session on accuracy and correct reaction times orientation-averaging/shooting task averaged across participants in each of the three shock conditions are shown in <xref ref-type="fig" rid="F3">Figure 3</xref>. Visual inspection of this figure reveals several trends. Most notable are the effects of training session, with performance generally improving (accuracy increasing (top row <xref ref-type="fig" rid="F3">Figures 3a-e</xref>), response times decreasing (bottom row <xref ref-type="fig" rid="F3">Figures 3f-j</xref>)) with subsequent days of testing. With regard to set-size (columns <xref ref-type="fig" rid="F3">Figure 3</xref>), one can observe overall reductions in accuracy and increases in response times with increasing set-size. The effects of our various shock conditions (coloured curves) are less obvious from visual inspection.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Accuracy and correct response times. Effects of training session and set-size on participant-averaged accuracy <bold>(a-e)</bold> and response times <bold>(f-j)</bold> in each of the three shock condition training groups: no shock (blue), random shock (red) and performance-contingent shock (yellow). Shaded regions are between-subjects standard errors.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g003.tif">
<alt-text content-type="machine-generated">Line graphs depict percentage correctness and response times across ten training sessions for different set sizes and conditions. Top row shows percentage correct increasing; bottom row shows response times decreasing. Conditions include No shock (blue), Random (red), and Performance-contingent (yellow).</alt-text>
</graphic>
</fig>
<p>To statistically assess the main and interactive effects of training and shock condition we applied growth curve analysis (<xref ref-type="bibr" rid="B35">Mirman, 2014</xref>). Growth curve analysis can be used to analyse performance changes in longitudinal studies, so we employed this technique to model the effects of training in our orientation-averaging task. This was done using the <italic>lmer</italic> function from the package <italic>lmerTest</italic> (<xref ref-type="bibr" rid="B29">Kuznetsova et al., 2017</xref>) in <italic>R</italic> version 4.2.1 (<xref ref-type="bibr" rid="B49">Team, 2022</xref>). Given that several of the curves in <xref ref-type="fig" rid="F4">Figure 4</xref> had at least a single inflection, growth curve data were modelled with up to second-order orthogonal polynomials, which were assessed by three terms in the fitted model: the intercept describes the mean values, the linear term captures the negative or positive slope of the curve over time, and the second-order quadratic term signifies degree of inflection in curve complexity, i.e., the depth of any peak or valley in the training curve over time. Differences in at least one of the three terms (intercept, linear, and quadratic) must be significant to indicate a reliable growth curve difference between the three shock conditions groups.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Effects of training session. Panel <bold>(a)</bold> shows on participant-averaged accuracy and panel <bold>(b)</bold> shows response times averaged across set-size in each of the three shock-conditions: no shock (blue), random shock (red) and performance-contingent shock (yellow). Shaded regions are between-subjects standard errors.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g004.tif">
<alt-text content-type="machine-generated">Line graphs display data from training sessions over ten days under three conditions: no shock, random, and performance-contingent. Graph (a) shows the percentage correct, increasing from about 60% to 85% across sessions. Graph (b) shows response times, decreasing from 2000 milliseconds to around 1600 milliseconds. Shaded areas represent variability in data.</alt-text>
</graphic>
</fig>
<sec id="s3-1">
<title>3.1 Training effects</title>
<p>Whether participants performed differently across training sessions was assessed by fitting linear mixed-effect models with shock condition and training session as fixed effects and set-size and participants as random effects. The Kenward-Roger degrees of freedom approximation (<xref ref-type="bibr" rid="B29">Kuznetsova et al., 2017</xref>) was used to calculate <italic>p</italic> values for the fixed-effect factors, and the <italic>ANOVA</italic> function from package <italic>car</italic> (<xref ref-type="bibr" rid="B19">Fox and Weisberg, 2018</xref>) was used to calculate <italic>F</italic>. Pairwise comparisons were conducted using the <italic>lsmeans</italic> package (<xref ref-type="bibr" rid="B31">Lenth, 2016</xref>) in <italic>R</italic> when necessary. Linear mixed-effect ANOVAs on our performance measures were undertaken using Type-II Wald F tests with Kenward-Roger degrees of freedom.</p>
<sec id="s3-1-1">
<title>3.1.1 Accuracy</title>
<p>There was no main effect of shock condition on accuracy (<italic>F</italic> (2, 13) &#x3d; 0.004, <italic>p</italic> &#x3d; 0.996), suggesting that exposure to either random or performance-contingent shock did not influence overall accuracy.</p>
<p>A significant main effect of training session was observed (<italic>F</italic> (9, 754.02) &#x3d; 54.639, <italic>p</italic> &#x3c; 0.001), with participants showing improved accuracy over the 10-day training period. Post hoc comparisons indicated that these gains were driven by improvement after Day 1 (Day 1 vs. Days 2&#x2013;10, all <italic>p</italic> &#x3c; 0.001), with no significant changes between subsequent training days (all p-values &#x3e; 0.05).</p>
<p>No interaction between training session and shock condition was found (<italic>F</italic> (18, 754.02) &#x3d; 1.04, <italic>p</italic> &#x3d; 0.343), suggesting that shock exposure did not modulate learning-based accuracy improvements.</p>
</sec>
<sec id="s3-1-2">
<title>3.1.2 Response times</title>
<p>Similarly, no main effect of shock condition was observed on RTs (<italic>F</italic> (2, 13) &#x3d; 0.453, <italic>p</italic> &#x3d; 0.646), and no between-group differences were detected on any testing day (all <italic>p</italic> &#x3e; 0.05).</p>
<p>A significant main effect of training session was detected (<italic>F</italic> (9, 745.01) &#x3d; 99.249, <italic>p</italic> &#x3c; 0.001), with RTs decreasing across sessions. However, a significant interaction between shock condition and training session was also found (<italic>F</italic> (18, 745.01) &#x3d; 5.181, <italic>p</italic> &#x3c; 0.001). Despite this interaction, follow-up growth curve modeling revealed no significant differences in intercept, slope, or curvature across shock groups (all <italic>p</italic> &#x3e; 0.05).</p>
<p>Contrasts examining within-group RT changes revealed that improvements occurred at different time points: performance-contingent shock participants showed gains from Day 3, no-shock participants from Day 4, and random shock participants from Day 5. Further improvements were observed late in training only in the performance-contingent and random shock groups. These patterns suggest differential timing in learning progression across shock conditions, with random shock potentially delaying early gains.</p>
</sec>
</sec>
<sec id="s3-2">
<title>3.2 Set-size effects</title>
<sec id="s3-2-1">
<title>3.2.1 Accuracy</title>
<p>Effects of set-size on accuracy are shown in <xref ref-type="fig" rid="F5">Figure 5a</xref>. A robust main effect of set-size was observed (<italic>F</italic>(4, 755) = 484.951, <italic>p</italic> &#x3c; 0.0001), with increasing set-size yielding lower accuracy. All pairwise comparisons between set-sizes showed significant differences (all <italic>p</italic> &#x3c; 0.0001), except between set-sizes 3 and 4 (<italic>p</italic> &#x3d; 0.006).</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Effects of set-size. Panel <bold>(a)</bold> shows participant-averaged accuracy and panel <bold>(b)</bold> shows response times averaged across training session in each of the three shock-conditions: no shock (blue), random shock (red) and performance-contingent shock (yellow). Shaded regions are between-subjects standard errors.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g005.tif">
<alt-text content-type="machine-generated">Graph illustrating two panels: (a) shows a decline in percentage correct with increasing set size across three conditions (No shock, Random, Performance-contingent). (b) displays response times increasing with set size for the same conditions. Both panels use color-coded lines with shaded regions to indicate variability.</alt-text>
</graphic>
</fig>
<p>A significant interaction between set-size and shock condition was found (F(8, 755) = 2.186, <italic>p</italic> = 0.026). However, no growth curve model differences were detected between shock groups (all <italic>p</italic> &#x3e; 0.05). Within each group, performance declined with increasing set-size, though comparisons between set-sizes 3 and 4 were non-significant in all groups (all <italic>p</italic> &#x3e; 0.05). No significant between-group accuracy differences were detected at any set-size (all <italic>p</italic> &#x3e; 0.05).</p>
</sec>
<sec id="s3-2-2">
<title>3.2.2 Response times</title>
<p>Effects of set-size on response times are shown in <xref ref-type="fig" rid="F5">Figure 5b</xref>. Response times increased significantly with set-size (F(4, 755) = 118.530, <italic>p</italic> &#x3c; 0.0001). Planned contrasts showed significant increases for most comparisons, except those involving set-sizes 3, 4, and 8 (all <italic>p</italic> &#x3e; 0.05), suggesting performance plateaued beyond set-size 3.</p>
<p>A significant interaction between set-size and shock condition was observed (<italic>F</italic> (8, 755) &#x3d; 2.423, <italic>p</italic> &#x3d; 0.013), but again no differences in model fit were found (all <italic>p</italic> &#x3e; 0.05). Within-group contrasts showed generally increasing RTs with set-size, but differences were non-significant between specific sizes, especially beyond set-size 3. No between-group RT differences were found at any set-size (all <italic>p</italic> &#x3e; 0.05).</p>
</sec>
</sec>
<sec id="s3-3">
<title>3.3 Training &#xd7; set-size interaction</title>
<p>Mixed-effects models revealed main effects of training session on both accuracy (<italic>F</italic> (9, 727.01) &#x3d; 53.634, <italic>p</italic> &#x3c; 0.0001) and RTs (<italic>F</italic> (9, 727.01) &#x3d; 86.607, <italic>p</italic> &#x3c; 0.0001). Set-size also produced significant main effects for both accuracy (<italic>F</italic> (4, 727.00) &#x3d; 471.307, <italic>p</italic> &#x3c; 0.0001) and RTs (<italic>F</italic> (4, 727.00) &#x3d; 111.968, <italic>p</italic> &#x3c; 0.0001).</p>
<p>No significant training &#xd7; set-size interactions were found for accuracy (<italic>F</italic> (36, 727.00) &#x3d; 0.660, <italic>p</italic> &#x3d; 0.9441) or RTs (<italic>F</italic> (36, 727.00) &#x3d; 0.125, <italic>p</italic> &#x3d; 1.0), indicating that training improved performance uniformly across set-sizes.</p>
</sec>
<sec id="s3-4">
<title>3.4 Health-bar feedback</title>
<p>For the next series of analyses, we investigated whether performance feedback visible to participants via the &#x2018;health bar&#x2019; (present throughout each training session) affected participant performance (see Methods for information on health bar performance feedback. <xref ref-type="fig" rid="F6">Figure 6</xref> shows the relationship between health-bar level (low (&#x2264;20%), medium (21%&#x2013;80%) and high values (&#x3e;80%)) on average accuracy and response times in each of the three group training conditions.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Effects of visual health-bar status. Box plots showing the effect of visual health-bar performance feedback (low (&#x2264;20%), medium (21%&#x2013;80%) and high values (&#x3e;80%)) on average accuracy are shown in panel <bold>(a)</bold> and response times in panel <bold>(b)</bold> in each of the three shock groups: no shock (blue), random shock (red), and performance-contingent shock (yellow). Error bars represent between subject standard errors.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g006.tif">
<alt-text content-type="machine-generated">Box plots depicting (a) percentage correct and (b) response times across low, medium, and high health index categories. Conditions include No shock (blue), Random (pink), and Performance-contingent (yellow). Each condition shows variations in performance and response times at different health levels.</alt-text>
</graphic>
</fig>
<p>Health-bar level had a significant main effect on accuracy (<italic>F</italic> (2, 922) &#x3d; 497.380, <italic>p</italic> &#x3c; 0.0001), with higher visual feedback levels corresponding to better performance. All pairwise comparisons were significant (all <italic>p</italic> &#x3c; 0.0001).</p>
<p>No effect of health-bar level was observed on RTs (<italic>F</italic> (2, 922) &#x3d; 0.310, <italic>p</italic> &#x3e; 0.05). Shock condition did not significantly influence accuracy (<italic>F</italic> (2, 922) &#x3d; 0.001, <italic>p</italic> &#x3e; 0.05) or RTs (<italic>F</italic> (2, 922) &#x3d; 0.475, <italic>p</italic> &#x3d; 0.632), and no interactions were found between health-bar and shock condition (accuracy: <italic>F</italic> (2, 922) &#x3d; 0.326, RTs: <italic>F</italic> (2, 922) &#x3d; 0.299, both <italic>p</italic> &#x3e; 0.05).</p>
</sec>
<sec id="s3-5">
<title>3.5 Response to shock delivery</title>
<p>The health-bar provides performance feedback to the participant, dropping 10% following each incorrect trial, and refreshing after each 10th incorrect trial. It is worth noting that the health bar status is predictive and consequential only for participants in the <italic>performance-contingent</italic> shock group who received a shock following nine previous errors. The health bar level is completely inconsequential (i.e., unpredictive of the of a shock stimulus) for participants in both the <italic>no-shock</italic> and <italic>random shock</italic> groups. To determine whether there exist additional effects of anticipating and/or receiving a physical shock, we conducted additional analyses evaluating the effects on performance (accuracy and response times) on the five trials immediately preceding and the five trials immediately succeeding a physical shock (<xref ref-type="fig" rid="F7">Figure 7</xref>). Given that shock was only presented to participants in the <italic>random</italic> and <italic>performance-contingent shock</italic> conditions, participants in the <italic>no-shock</italic> condition were omitted from these analyses.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Performance relative to shock delivery. Mean accuracy on the five trials preceding and succeeding a shock stimulus are shown in panel <bold>(a)</bold> and mean response times are shown in panel <bold>(b)</bold> in the random (black squares) and performance-contingent (unfilled circles) shock training groups. The vertical grey line in each figure represents the point at which a shock stimulus was presented. Error bars are between-subjects standard errors.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g007.tif">
<alt-text content-type="machine-generated">Graphs showing performance metrics relative to shock in two conditions: random and performance-contingent. Chart (a) depicts percentage correct, with higher values for random conditions. Chart (b) shows response times in milliseconds, with slight variability, and random conditions generally having quicker responses. Both charts display results across trials before and after shock application.</alt-text>
</graphic>
</fig>
<p>Analyses of the five trials before and after shock events in random and performance-contingent groups revealed no main effects between these groups on accuracy or RTs (all <italic>p</italic> &#x3e; 0.05). A significant interaction between trial number and shock condition was observed for RTs (<italic>F</italic> (4, 99) &#x3d; 3.313, <italic>p</italic> &#x3d; 0.016), but no pairwise comparisons survived Bonferroni correction (all <italic>p</italic> &#x3e; 0.05).</p>
</sec>
<sec id="s3-6">
<title>3.6 State anxiety</title>
<p>Effects of pre-vs. post-session training and shock condition on subjective stress are shown in <xref ref-type="fig" rid="F8">Figure 8</xref>. No main effects were found for shock condition (<italic>F</italic> (2, 12.99) &#x3d; 0.186, <italic>p</italic> &#x3d; 0.832) or training session (<italic>F</italic> (9, 245.01) &#x3d; 0.326, <italic>p</italic> &#x3d; 0.966) on state anxiety. A significant effect of test time (pre vs. post) was detected (<italic>F</italic> (1, 345.00) &#x3d; 22.032, <italic>p</italic> &#x3d; 0.0001), with post-training anxiety higher than pre-training.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Self-reported anxiety before and after training across groups. Relationship between average STAI state anxiety subscale scores obtained across training sessions for each shock-condition training group: no shock (blue), random shock (red) and performance-contingent (yellow); measured immediately prior to (solid lines) and following (dashed lines) each training session. Shaded regions represent between-subject standard errors.</p>
</caption>
<graphic xlink:href="frvir-06-1623584-g008.tif">
<alt-text content-type="machine-generated">Line graph showing STAI/State anxiety scores over 10 training sessions for six conditions: No shock Pre/Post (blue), Random Pre/Post (red), and Performance-contingent Pre/Post (yellow). The bands represent data variability.</alt-text>
</graphic>
</fig>
<p>A significant interaction between time (pre vs. post) and shock condition was found (<italic>F</italic> (2, 245.00) &#x3d; 3.359, <italic>p</italic> &#x3d; 0.036). Post hoc analyses revealed no change in the no-shock group (<italic>p</italic> &#x3d; 0.996), but significant post-training increases in anxiety for the performance-contingent (<italic>p</italic> &#x3c; 0.001) and random shock conditions (<italic>p</italic> &#x3d; 0.02).</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>4 Discussion</title>
<sec id="s4-1">
<title>4.1 Summary of aims and key findings</title>
<p>This study addressed two key questions in perceptual learning. First, we investigated whether training enhances the human visual system&#x2019;s capacity to integrate orientation information across the visual field. Second, we explored whether aversive electrodermal stimulation (used as acute stress or punishment) modulates perceptual learning in an orientation-averaging task implemented in virtual reality.</p>
<p>Our findings demonstrate (1) consistent performance degradation with increasing set-size, (2) significant training-related improvements in accuracy and response times, and (3) no evidence that training altered the slope of the set-size effect. These results suggest that training does not enhance early-stage sensory integration but likely operates at a post-integration or decisional stage. We also observed (4) no robust effects of shock condition on learning outcomes, and (5) an unexpected, systematic influence of visual performance feedback (&#x201c;health bar&#x201d;) on accuracy.</p>
</sec>
<sec id="s4-2">
<title>4.2 Visual integration and training effects</title>
<p>Whilst broadly consistent with prior work, we show for the first time that orientation-averaging performance degraded with larger set-sizes. This suggests that the visual system&#x2019;s capacity for averaging orientation information across 2D space is fundamentally constrained. Whilst training led to significant gains in accuracy and response speed, there was with no interaction between training and set-size, implying that the quantity of integrated information did not increase. These gains, therefore, are likely to arise at decision-related stages&#x2014;possibly by increasing the signal-to-noise ratio during integration or decision execution (<xref ref-type="bibr" rid="B11">Dakin et al., 2009</xref>; <xref ref-type="bibr" rid="B36">Moerel et al., 2016</xref>).</p>
<p>Our task design departed from prior orientation-averaging studies in several important respects, particularly in how local Gabor orientations were sampled on each trial. While previous studies typically draw orientations randomly from Gaussian distributions with a defined mean and standard deviation (e.g. (<xref ref-type="bibr" rid="B36">Moerel et al., 2016</xref>; <xref ref-type="bibr" rid="B11">Dakin et al., 2009</xref>)), we used a deterministic method to fix the global mean orientation across trials. Specifically, we first sampled all but one Gabor from a Gaussian distribution and then adjusted the final Gabor&#x2019;s orientation to ensure that the mean orientation of the array exactly matched a predefined target (e.g., &#xb1;5&#xb0; from vertical). This approach eliminated trial-by-trial variability in global mean orientation that can arise in probabilistic sampling schemes.</p>
<p>However, one potential drawback of our method is that it may have encouraged participants to adopt a &#x201c;max-rule&#x201d; strategy&#x2014;relying on the most tilted element in the array rather than computing a true average. Such a strategy would introduce substantial variability in responses and lead to a higher rate of incorrect choices. If participants relied on this approach, at least on a subset of trials, task performance may have been supported in part by relational visual search mechanisms (e.g. (<xref ref-type="bibr" rid="B4">Becker et al., 2025</xref>)), rather than global ensemble processing. Future work could address this limitation by drawing stimulus sets from a large library of pre-generated Gaussian samples with fixed means and variances, which would preserve statistical consistency across trials without encouraging max-rule heuristics.</p>
</sec>
<sec id="s4-3">
<title>4.3 Crowding, eye movements, and generalisability</title>
<p>While visual crowding can impair averaging by reducing access to local features, we argue that crowding is unlikely to account for our set-size effects. First, the inter-Gabor spacing in our design exceeded Bouma&#x2019;s limit for crowding interference (<xref ref-type="bibr" rid="B28">Kurzawski et al., 2023</xref>; <xref ref-type="bibr" rid="B50">Van der Burg et al., 2024</xref>; <xref ref-type="bibr" rid="B7">Bouma, 1970</xref>). Second, participants were free to move their eyes, which reduces crowding by maintaining central fixation and reducing retinal eccentricity. Nonetheless, because eye movement patterns were not monitored, comparisons with fixed-eccentricity paradigms should be interpreted with caution as the distinction between early and late selection frameworks typically hinges on whether orientation averaging depends on retinotopically local filters&#x2014;such as V1 neurons&#x2014;which are spatially specific and sensitive to gaze position. In our task, Gabor elements were visible for extended durations, allowing participants to freely move their eyes. This limits our ability to isolate the contribution of early, retinotopically specific encoding mechanisms, as participants may have used saccades to serially sample the array. That said, it is nonetheless noteworthy that performance consistently declined with increasing set-size, suggesting that averaging was at least partially capacity-limited despite the opportunity for foveal sampling. Future studies should address this issue by using brief stimulus presentations (&#x3c;200&#xa0;m) to ensure that saccades are not utilised during encoding. In addition, gaze-contingent presentation could be employed to fix the eccentricity of the Gabor elements relative to gaze, enabling more direct assessment of peripheral ensemble processing. Finally, integrating eye-tracking measures would allow quantification of oculomotor strategies and their impact on orientation averaging.</p>
</sec>
<sec id="s4-4">
<title>4.4 Role of attention in perceptual learning</title>
<p>Although perceptual learning can improve orientation discrimination through early-stage enhancements (<xref ref-type="bibr" rid="B34">Matthews et al., 1999</xref>; <xref ref-type="bibr" rid="B54">Zhang et al., 2010</xref>), our task introduced random variation in both Gabor location and orientation across trials. Such variation likely prevented low-level learning. Prior work shows that perceptual learning generalises across location and orientation when exogenous attentional cues are present (<xref ref-type="bibr" rid="B15">Donovan et al., 2020</xref>). In line with this, we propose that learning in our task reflects enhancements in attentional efficiency at a post-integration or decisional stage&#x2014;rather than early-stage sensory encoding.</p>
</sec>
<sec id="s4-5">
<title>4.5 Shock, stress, and perceptual learning</title>
<p>Contrary to predictions, we observed no main effect of shock condition on task accuracy or learning rate. However, a shock condition &#xd7; session interaction was observed for response times, with participants in the performance-contingent shock condition improving faster than those in no-shock or random-shock groups. This may reflect increased urgency or motivation to avoid aversive outcomes.</p>
<p>Temporal patterns differed across groups: performance-contingent shock yielded improvements by Day 3, while other conditions lagged by 1&#x2013;2 days. These differences suggest subtle effects of punishment contingency on response preparation or motivational state, although not robust enough to affect accuracy.</p>
<p>Self-reported anxiety, as measured by the STAI, was elevated post-training in shock conditions, particularly in the performance-contingent group. Yet, no correlation was found between anxiety scores and behavioural performance. This suggests that STAI may lack sensitivity to trial-level changes or that anxiety&#x2019;s influence is indirect. Future studies should incorporate objective physiological stress markers (e.g., salivary cortisol (<xref ref-type="bibr" rid="B25">Hellhammer et al., 2009</xref>), pupillometry (<xref ref-type="bibr" rid="B21">Ginton et al., 2022</xref>)) to clarify these relationships.</p>
</sec>
<sec id="s4-6">
<title>4.6 Unexpected role of visual feedback</title>
<p>A key and unexpected finding was that orientation-averaging accuracy varied systematically with health-bar status&#x2014;even in conditions where the health bar had no consequences (no-shock, random-shock). Higher displayed &#x201c;health&#x201d; predicted better accuracy, regardless of shock contingency. This effect was not accompanied by changes in response times, ruling out a speed-accuracy trade-off.</p>
<p>We propose that the health bar acted as a motivational or attentional cue. Visual feedback has been shown to enhance perceptual learning by directing attention and reinforcing effort (<xref ref-type="bibr" rid="B45">Seitz and Watanabe, 2003</xref>; <xref ref-type="bibr" rid="B38">Posner, 1980</xref>; <xref ref-type="bibr" rid="B1">Ahissar and Hochstein, 1993</xref>). In our task, the health bar may have operated as an implicit performance incentive, encouraging attentional persistence and error avoidance. This aligns with prior findings showing that feedback&#x2014;even when decoupled from consequence&#x2014;can shape perceptual strategies (<xref ref-type="bibr" rid="B6">Blank et al., 2013</xref>; <xref ref-type="bibr" rid="B17">Eisma et al., 2021</xref>; <xref ref-type="bibr" rid="B10">Choi and Watanabe, 2012</xref>). While some studies dispute the necessity of feedback for learning (<xref ref-type="bibr" rid="B3">Asher and Hibbard, 2020</xref>), our findings suggest that continuous visual feedback can enhance perceptual accuracy independently of external reinforcement.</p>
<p>Time-course analyses further support this view: performance was not different immediately before or after shock events, ruling out the possibility that shock anticipation alone explains the health bar effect.</p>
</sec>
<sec id="s4-7">
<title>4.7 Limitations and future directions</title>
<p>While the present study offers novel insights into perceptual learning and the effects of aversive stimulation within a VR environment, several limitations should be acknowledged.</p>
<p>First, although our findings suggest that training-related gains occur at a post-integration stage, the possibility that eye movements may have contributed to orientation averaging performance limits our ability to draw definitive conclusions about early-stage mechanisms. Because Gabor arrays were visible until response and no gaze-contingent control was implemented, participants were free to foveate individual elements. Future studies should adopt brief (&#x3c;200&#xa0;m) stimulus presentation times and/or gaze-contingent displays to ensure that ensemble averaging relies on peripheral processing and to isolate the contributions of early, retinotopically specific filters.</p>
<p>Second, the lack of objective physiological markers of arousal limits interpretation of the observed elevation in STAI-measured anxiety following shock exposure. While the STAI is widely used, it may not capture transient fluctuations in arousal that occur on a trial-by-trial basis. Incorporating direct physiological metrics such as salivary cortisol, galvanic skin response, or pupillometry would enable more precise characterization of stress responses and their relationship to perceptual performance.</p>
<p>Third, while our findings suggest that the health bar acts as a visual feedback signal that improves performance, we did not include a condition in which the health bar was absent. As such, although the statistical association between health bar magnitude and accuracy was robust&#x2014;even in conditions where it had no consequences&#x2014;causal inferences are constrained. Future research should include a no-health-bar control condition and manipulate the timing, visibility, or relevance of feedback to better understand its role in attentional persistence and performance modulation.</p>
<p>Finally, although our immersive VR platform enabled a more engaging and ecologically valid testing environment, it also introduced some variability in response timing and user interaction. Comparing outcomes from VR-based tasks with those obtained from conventional 2D psychophysics paradigms would help assess the generalisability of results across modalities and help establish VR as a reliable platform for vision science.</p>
<p>Taken together, these limitations suggest several promising directions for future work. Larger sample sizes, objective arousal measures, constrained visual exposure, and systematic feedback manipulation will be key to refining our understanding of the interplay between motivation, stress, and perceptual learning.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>This study investigated how perceptual learning in a visual averaging task unfolds over time and under varying levels of aversive electrodermal stimulation in an immersive VR environment. We found that while training led to clear improvements in accuracy and response time, these gains did not interact with set-size, suggesting that learning likely occurred at a post-integration or decisional stage rather than through enhancements in early-stage sensory processing.</p>
<p>Contrary to expectations, shock-based stimulation&#x2014;whether performance-contingent or random&#x2014;had limited impact on task accuracy, although the performance-contingent shock condition did yield faster improvements in response time. Self-reported anxiety increased in shock groups, but this did not correlate with performance outcomes.</p>
<p>A key and unanticipated finding was that visual feedback, in the form of a health bar, strongly predicted accuracy&#x2014;even in conditions where it carried no external consequences. This suggests that persistent visual feedback may serve as an implicit motivator or attentional cue, influencing perceptual performance independently of reinforcement.</p>
<p>Taken together, our findings highlight the role of attentional and motivational mechanisms in perceptual learning and demonstrate that immersive VR platforms can be used to systematically investigate these processes. Future studies should include objective physiological measures, remove or manipulate feedback mechanisms, and compare immersive versus traditional platforms to further distinguish the factors that shape learning in across visual presentation environments.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Western Sydney University Human Research Ethics Committee. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>JC: Investigation, Writing &#x2013; original draft, Conceptualization, Resources, Project administration, Formal Analysis, Supervision, Funding acquisition, Writing &#x2013; review and editing, Methodology, Validation. WHF: Methodology, Writing &#x2013; review and editing, Writing &#x2013; original draft. YL: Visualization, Formal Analysis, Writing &#x2013; original draft, Writing &#x2013; review and editing. LC: Conceptualization, Writing &#x2013; review and editing, Funding acquisition. GW: Supervision, Methodology, Writing &#x2013; original draft, Investigation, Funding acquisition, Project administration, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research and/or publication of this article. This project was funded by a Human Performance Research Project (ID9104), funded by DSTG, awarded to JC and GW.</p>
</sec>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declare that no Generative AI was used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ahissar</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hochstein</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>1993</year>). <article-title>Attentional control of early perceptual learning</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A.</source> <volume>90</volume>, <fpage>5718</fpage>&#x2013;<lpage>5722</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.90.12.5718</pub-id>
<pub-id pub-id-type="pmid">8516322</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arnsten</surname>
<given-names>A. F.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Stress signalling pathways that impair prefrontal cortex structure and function</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>10</volume>, <fpage>410</fpage>&#x2013;<lpage>422</lpage>. <pub-id pub-id-type="doi">10.1038/nrn2648</pub-id>
<pub-id pub-id-type="pmid">19455173</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Asher</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Hibbard</surname>
<given-names>P. B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>No effect of feedback, level of processing or stimulus presentation protocol on perceptual learning when easy and difficult trials are interleaved</article-title>. <source>Vis. Res.</source> <volume>176</volume>, <fpage>100</fpage>&#x2013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1016/j.visres.2020.07.011</pub-id>
<pub-id pub-id-type="pmid">32836059</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Becker</surname>
<given-names>S. I.</given-names>
</name>
<name>
<surname>Hamblin-Frohman</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Amarasekera</surname>
<given-names>K. D. R.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Visual search is relational without prior context learning</article-title>. <source>Cognition</source> <volume>260</volume>, <fpage>106132</fpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2025.106132</pub-id>
<pub-id pub-id-type="pmid">40186982</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Blakemore</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Nachmias</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1971</year>). <article-title>The orientation specificity of two visual after-effects</article-title>. <source>J. Physiol.</source> <volume>213</volume>, <fpage>157</fpage>&#x2013;<lpage>174</lpage>. <pub-id pub-id-type="doi">10.1113/jphysiol.1971.sp009374</pub-id>
<pub-id pub-id-type="pmid">5575335</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Blank</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Biele</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Heekeren</surname>
<given-names>H. R.</given-names>
</name>
<name>
<surname>Philiastides</surname>
<given-names>M. G.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Temporal characteristics of the influence of punishment on perceptual decision making in the human brain</article-title>. <source>J. Neurosci.</source> <volume>33</volume>, <fpage>3939</fpage>&#x2013;<lpage>3952</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.4151-12.2013</pub-id>
<pub-id pub-id-type="pmid">23447604</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bouma</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>1970</year>). <article-title>Interaction effects in parafoveal letter recognition</article-title>. <source>Nature</source> <volume>226</volume>, <fpage>177</fpage>&#x2013;<lpage>178</lpage>. <pub-id pub-id-type="doi">10.1038/226177a0</pub-id>
<pub-id pub-id-type="pmid">5437004</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carrasco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Frieder</surname>
<given-names>K. S.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Cortical magnification neutralizes the eccentricity effect in visual search</article-title>. <source>Vis. Res.</source> <volume>37</volume>, <fpage>63</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1016/s0042-6989(96)00102-2</pub-id>
<pub-id pub-id-type="pmid">9068831</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carrasco</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Evert</surname>
<given-names>D. L.</given-names>
</name>
<name>
<surname>Chang</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Katz</surname>
<given-names>S. M.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>The eccentricity effect: target eccentricity affects performance on conjunction searches</article-title>. <source>Percept. Psychophys.</source> <volume>57</volume>, <fpage>1241</fpage>&#x2013;<lpage>1261</lpage>. <pub-id pub-id-type="doi">10.3758/bf03208380</pub-id>
<pub-id pub-id-type="pmid">8539099</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Choi</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Watanabe</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Perceptual learning solely induced by feedback</article-title>. <source>Vis. Res.</source> <volume>61</volume>, <fpage>77</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1016/j.visres.2012.01.006</pub-id>
<pub-id pub-id-type="pmid">22269189</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dakin</surname>
<given-names>S. C.</given-names>
</name>
<name>
<surname>Bex</surname>
<given-names>P. J.</given-names>
</name>
<name>
<surname>Cass</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Watt</surname>
<given-names>R. J.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Dissociable effects of attention and crowding on orientation averaging</article-title>. <source>J. Vis.</source> <volume>9</volume>, <fpage>28</fpage>. <pub-id pub-id-type="doi">10.1167/9.11.28</pub-id>
<pub-id pub-id-type="pmid">20053091</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>DE Voogd</surname>
<given-names>L. D.</given-names>
</name>
<name>
<surname>Hagenberg</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Y. J.</given-names>
</name>
<name>
<surname>DE Lange</surname>
<given-names>F. P.</given-names>
</name>
<name>
<surname>Roelofs</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Acute threat enhances perceptual sensitivity without affecting the decision criterion</article-title>. <source>Sci. Rep.</source> <volume>12</volume>, <fpage>9071</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-11664-0</pub-id>
<pub-id pub-id-type="pmid">35641536</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dinse</surname>
<given-names>H. R.</given-names>
</name>
<name>
<surname>Kattenstroth</surname>
<given-names>J. C.</given-names>
</name>
<name>
<surname>Lenz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Tegenthoff</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wolf</surname>
<given-names>O. T.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>The stress hormone cortisol blocks perceptual learning in humans</article-title>. <source>Psychoneuroendocrinology</source> <volume>77</volume>, <fpage>63</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1016/j.psyneuen.2016.12.002</pub-id>
<pub-id pub-id-type="pmid">28024270</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Donovan</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Carrasco</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Endogenous spatial attention during perceptual learning facilitates location transfer</article-title>. <source>J. Vis.</source> <volume>18</volume>, <fpage>7</fpage>. <pub-id pub-id-type="doi">10.1167/18.11.7</pub-id>
<pub-id pub-id-type="pmid">30347094</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Donovan</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Shen</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Tortarolo</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Barbot</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Carrasco</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Exogenous attention facilitates perceptual learning in visual acuity to untrained stimulus locations and features</article-title>. <source>J. Vis.</source> <volume>20</volume>, <fpage>18</fpage>. <pub-id pub-id-type="doi">10.1167/jov.20.4.18</pub-id>
<pub-id pub-id-type="pmid">32340029</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Ebersole</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). &#x201c;<article-title>
<italic>Stress as a Moderator of visual perception: do elevated stress levels Interfere with visual cognition?</italic>. Master thesis. Booth Library, Eastern Illinois University</article-title>. <fpage>2439</fpage>. <comment>Available online at: <ext-link ext-link-type="uri" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="https://thekeep.eiu.edu/theses/2439">https://thekeep.eiu.edu/theses/2439</ext-link>
</comment>.</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Eisma</surname>
<given-names>Y. B.</given-names>
</name>
<name>
<surname>Borst</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Paassen</surname>
<given-names>R. V.</given-names>
</name>
<name>
<surname>Winter</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Augmented visual feedback: cure or distraction?</article-title> <source>Hum. Factors</source> <volume>63</volume>, <fpage>1156</fpage>&#x2013;<lpage>1168</lpage>. <pub-id pub-id-type="doi">10.1177/0018720820924602</pub-id>
<pub-id pub-id-type="pmid">32489117</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Erickson</surname>
<given-names>J. R.</given-names>
</name>
</person-group> (<year>1970</year>). <article-title>Effects of punishment for errors on discrimination learning by humans</article-title>. <source>J. Exp. Psychol.</source> <volume>83</volume>, <fpage>112</fpage>&#x2013;<lpage>119</lpage>. <pub-id pub-id-type="doi">10.1037/h0028521</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Fox</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Weisberg</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2018</year>). <source>An R companion to applied regression</source>. <publisher-loc>Thousand Oaks, CA, United States</publisher-loc>: <publisher-name>SAGE Publications, Inc</publisher-name>.</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gilbert</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ito</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kapadia</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Westheimer</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Interactions between attention, context and learning in primary visual cortex</article-title>. <source>Vis. Res.</source> <volume>40</volume>, <fpage>1217</fpage>&#x2013;<lpage>1226</lpage>. <pub-id pub-id-type="doi">10.1016/s0042-6989(99)00234-5</pub-id>
<pub-id pub-id-type="pmid">10788637</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ginton</surname>
<given-names>L. M.</given-names>
</name>
<name>
<surname>Vuong</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Lake</surname>
<given-names>M. T.</given-names>
</name>
<name>
<surname>Nhapi</surname>
<given-names>R. T.</given-names>
</name>
<name>
<surname>Zar</surname>
<given-names>H. J.</given-names>
</name>
<name>
<surname>Yrttiaho</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Investigating pupillometry to detect emotional regulation difficulties in post-traumatic stress disorder</article-title>. <source>World J. Biol. Psychiatry</source> <volume>23</volume>, <fpage>127</fpage>&#x2013;<lpage>135</lpage>. <pub-id pub-id-type="doi">10.1080/15622975.2021.1935316</pub-id>
<pub-id pub-id-type="pmid">34278953</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Goldstone</surname>
<given-names>R. L.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Perceptual learning</article-title>. <source>Annu. Rev. Psychol.</source> <volume>49</volume>, <fpage>585</fpage>&#x2013;<lpage>612</lpage>. <pub-id pub-id-type="doi">10.1146/annurev.psych.49.1.585</pub-id>
<pub-id pub-id-type="pmid">9496632</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cavanagh</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Intriligator</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1996</year>). <article-title>Attentional resolution and the locus of visual awareness</article-title>. <source>Nature</source> <volume>383</volume>, <fpage>334</fpage>&#x2013;<lpage>337</lpage>. <pub-id pub-id-type="doi">10.1038/383334a0</pub-id>
<pub-id pub-id-type="pmid">8848045</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Heinemann</surname>
<given-names>E. G.</given-names>
</name>
<name>
<surname>Marill</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>1954</year>). <article-title>Tilt adaptation and figural after-effect</article-title>. <source>J. Exp. Psychol.</source> <volume>48</volume>, <fpage>468</fpage>&#x2013;<lpage>472</lpage>. <pub-id pub-id-type="doi">10.1037/h0061512</pub-id>
<pub-id pub-id-type="pmid">13221743</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hellhammer</surname>
<given-names>D. H.</given-names>
</name>
<name>
<surname>Wust</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kudielka</surname>
<given-names>B. M.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Salivary cortisol as a biomarker in stress research</article-title>. <source>Psychoneuroendocrinology</source> <volume>34</volume>, <fpage>163</fpage>&#x2013;<lpage>171</lpage>. <pub-id pub-id-type="doi">10.1016/j.psyneuen.2008.10.026</pub-id>
<pub-id pub-id-type="pmid">19095358</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Herman</surname>
<given-names>J. P.</given-names>
</name>
<name>
<surname>Mcklveen</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Ghosal</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kopp</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Wulsin</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Makinson</surname>
<given-names>R.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). <article-title>Regulation of the hypothalamic-pituitary-adrenocortical stress response</article-title>. <source>Compr. Physiol.</source> <volume>6</volume>, <fpage>603</fpage>&#x2013;<lpage>621</lpage>. <pub-id pub-id-type="doi">10.1002/j.2040-4603.2016.tb00694.x</pub-id>
<pub-id pub-id-type="pmid">27065163</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jones</surname>
<given-names>P. D.</given-names>
</name>
<name>
<surname>Holding</surname>
<given-names>D. H.</given-names>
</name>
</person-group> (<year>1975</year>). <article-title>Extremely long-term persistence of the McCollough effect</article-title>. <source>J. Exp. Psychol. Hum. Percept. Perform.</source> <volume>1</volume>, <fpage>323</fpage>&#x2013;<lpage>327</lpage>. <pub-id pub-id-type="doi">10.1037//0096-1523.1.4.323</pub-id>
<pub-id pub-id-type="pmid">1185119</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kurzawski</surname>
<given-names>J. W.</given-names>
</name>
<name>
<surname>Burchell</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Thapa</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Winawer</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Majaj</surname>
<given-names>N. J.</given-names>
</name>
<name>
<surname>Pelli</surname>
<given-names>D. G.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The Bouma law accounts for crowding in 50 observers</article-title>. <source>J. Vis.</source> <volume>23</volume>, <fpage>6</fpage>. <pub-id pub-id-type="doi">10.1167/jov.23.8.6</pub-id>
<pub-id pub-id-type="pmid">37540179</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kuznetsova</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Brockhoff</surname>
<given-names>P. B.</given-names>
</name>
<name>
<surname>Christensen</surname>
<given-names>R. H. B.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>lmerTest package: tests in linear mixed effects models</article-title>. <source>J. Stat. Softw.</source> <volume>82</volume>, <fpage>1</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.18637/jss.v082.i13</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lawrence</surname>
<given-names>D. H.</given-names>
</name>
</person-group> (<year>1949</year>). <article-title>Acquired distinctiveness of cues: I. Transfer between discriminations on the basis of familiarity with the stimulus</article-title>. <source>J. Exp. Psychol.</source> <volume>39</volume>, <fpage>770</fpage>&#x2013;<lpage>784</lpage>. <pub-id pub-id-type="doi">10.1037/h0058097</pub-id>
<pub-id pub-id-type="pmid">15398590</pub-id>
</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lenth</surname>
<given-names>R. V.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Least-squares means: the R package lsmeans</article-title>. <source>J. Stat. Softw.</source> <volume>69</volume>, <fpage>1</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.18637/jss.v069.i01</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Howard</surname>
<given-names>J. D.</given-names>
</name>
<name>
<surname>Parrish</surname>
<given-names>T. B.</given-names>
</name>
<name>
<surname>Gottfried</surname>
<given-names>J. A.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Aversive learning enhances perceptual and cortical discrimination of indiscriminable odor cues</article-title>. <source>Science</source> <volume>319</volume>, <fpage>1842</fpage>&#x2013;<lpage>1845</lpage>. <pub-id pub-id-type="doi">10.1126/science.1152837</pub-id>
<pub-id pub-id-type="pmid">18369149</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<collab>LTD</collab> (<year>2022</year>). <article-title>GlassesOn pupils &#x26; lenses 4.51.1340 ed.: google play store</article-title>.</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Matthews</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Geesaman</surname>
<given-names>B. J.</given-names>
</name>
<name>
<surname>Qian</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>Perceptual learning on orientation and direction discrimination</article-title>. <source>Vis. Res.</source> <volume>39</volume>, <fpage>3692</fpage>&#x2013;<lpage>3701</lpage>. <pub-id pub-id-type="doi">10.1016/s0042-6989(99)00069-3</pub-id>
<pub-id pub-id-type="pmid">10746139</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mirman</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2014</year>). <source>Growth curve analysis and visualization using R</source>. <publisher-loc>Boca Raton, FL, United States</publisher-loc>: <publisher-name>CRC Press</publisher-name>.</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Moerel</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ling</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Jehee</surname>
<given-names>J. F.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Perceptual learning increases orientation sampling efficiency</article-title>. <source>J. Vis.</source> <volume>16</volume>, <fpage>36</fpage>. <pub-id pub-id-type="doi">10.1167/16.3.36</pub-id>
<pub-id pub-id-type="pmid">26913628</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Polat</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Sagi</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>1994</year>). <article-title>The architecture of perceptual spatial interactions</article-title>. <source>Vis. Res.</source> <volume>34</volume>, <fpage>73</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1016/0042-6989(94)90258-5</pub-id>
<pub-id pub-id-type="pmid">8116270</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Posner</surname>
<given-names>M. I.</given-names>
</name>
</person-group> (<year>1980</year>). <article-title>Orienting of attention</article-title>. <source>Q. J. Exp. Psychol.</source> <volume>32</volume>, <fpage>3</fpage>&#x2013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1080/00335558008248231</pub-id>
<pub-id pub-id-type="pmid">7367577</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prettyman</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Perceptual learning</article-title>. <source>Wiley Interdiscip. Rev. Cogn. Sci.</source> <volume>10</volume>, <fpage>e1489</fpage>. <pub-id pub-id-type="doi">10.1002/wcs.1489</pub-id>
<pub-id pub-id-type="pmid">30570213</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Resnik</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Sobel</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Auditory aversive learning increases discrimination thresholds</article-title>. <source>Nat. Neurosci.</source> <volume>14</volume>, <fpage>791</fpage>&#x2013;<lpage>796</lpage>. <pub-id pub-id-type="doi">10.1038/nn.2802</pub-id>
<pub-id pub-id-type="pmid">21552275</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rhodes</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Ruiz</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Rios</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Nguyen</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Miskovic</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Differential aversive learning enhances orientation discrimination</article-title>. <source>Cogn. Emot.</source> <volume>32</volume>, <fpage>885</fpage>&#x2013;<lpage>891</lpage>. <pub-id pub-id-type="doi">10.1080/02699931.2017.1347084</pub-id>
<pub-id pub-id-type="pmid">28683593</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rideaux</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>West</surname>
<given-names>R. K.</given-names>
</name>
<name>
<surname>Rangelov</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Mattingley</surname>
<given-names>J. B.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Distinct early and late neural mechanisms regulate feature-specific sensory adaptation in the human visual system</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A.</source> <volume>120</volume>, <fpage>e2216192120</fpage>. <pub-id pub-id-type="doi">10.1073/pnas.2216192120</pub-id>
<pub-id pub-id-type="pmid">36724257</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sanger</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bechtold</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Schoofs</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Blaszkewicz</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wascher</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>The influence of acute stress on attention mechanisms and its electrophysiological correlates</article-title>. <source>Front. Behav. Neurosci.</source> <volume>8</volume>, <fpage>353</fpage>. <pub-id pub-id-type="doi">10.3389/fnbeh.2014.00353</pub-id>
<pub-id pub-id-type="pmid">25346669</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sawaki</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Luck</surname>
<given-names>S. J.</given-names>
</name>
<name>
<surname>Raymond</surname>
<given-names>J. E.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>How attention changes in response to incentives</article-title>. <source>J. Cognitive Neurosci.</source> <volume>27</volume>, <fpage>2229</fpage>&#x2013;<lpage>2239</lpage>. <pub-id pub-id-type="doi">10.1162/jocn_a_00847</pub-id>
<pub-id pub-id-type="pmid">26151604</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seitz</surname>
<given-names>A. R.</given-names>
</name>
<name>
<surname>Watanabe</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Psychophysics: is subliminal learning really passive?</article-title> <source>Nature</source> <volume>422</volume>, <fpage>36</fpage>. <pub-id pub-id-type="doi">10.1038/422036a</pub-id>
<pub-id pub-id-type="pmid">12621425</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shalev</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Avidan</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Visual aversive learning compromises sensory discrimination</article-title>. <source>J. Neurosci.</source> <volume>38</volume>, <fpage>2766</fpage>&#x2013;<lpage>2779</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.0889-17.2017</pub-id>
<pub-id pub-id-type="pmid">29439168</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Spielberger</surname>
<given-names>C. D.</given-names>
</name>
<name>
<surname>Gorsuch</surname>
<given-names>R. L.</given-names>
</name>
<name>
<surname>Lushene</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Vagg</surname>
<given-names>P. R.</given-names>
</name>
<name>
<surname>Jacobs</surname>
<given-names>G. A.</given-names>
</name>
</person-group> (<year>1983</year>). <source>Manual for the state-trait anxiety inventory</source>. <publisher-loc>Palo Alto, CA</publisher-loc>: <publisher-name>Consulting Psychologists Press</publisher-name>.</citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Stegmann</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Andreatta</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pauli</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wieser</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Associative learning shapes visual discrimination in a web-based classical conditioning task</article-title>. <source>Sci. Rep.</source> <volume>11</volume>, <fpage>15762</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-95200-6</pub-id>
<pub-id pub-id-type="pmid">34344923</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Team</surname>
<given-names>R. C.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>R: the R project for statistical computing</article-title> (<volume>4</volume>.<issue>2.1</issue>).</citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Van Der Burg</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Cass</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Olivers</surname>
<given-names>C. N. L.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>A CODE model bridging crowding in sparse and dense displays</article-title>. <source>Vis. Res.</source> <volume>215</volume>, <fpage>108345</fpage>. <pub-id pub-id-type="doi">10.1016/j.visres.2023.108345</pub-id>
<pub-id pub-id-type="pmid">38142531</pub-id>
</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Watson</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Pearson</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Reinout</surname>
<given-names>W. W.</given-names>
</name>
<name>
<surname>Le Pelley</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Prioritizing pleasure and pain: attentional capture by reward-related and punishment-related stimuli</article-title>. <source>Curr. Opin. Behav. Sci.</source> <volume>26</volume>, <fpage>107</fpage>&#x2013;<lpage>113</lpage>. <pub-id pub-id-type="doi">10.1016/j.cobeha.2018.12.002</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Whitney</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Yamanashi Leib</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Ensemble perception</article-title>. <source>Annu. Rev. Psychol.</source> <volume>69</volume>, <fpage>105</fpage>&#x2013;<lpage>129</lpage>. <pub-id pub-id-type="doi">10.1146/annurev-psych-010416-044232</pub-id>
<pub-id pub-id-type="pmid">28892638</pub-id>
</citation>
</ref>
<ref id="B53">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yehezkel</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Sterkin</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Lev</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Polat</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Training on spatiotemporal masking improves crowded and uncrowded visual acuity</article-title>. <source>J. Vis.</source> <volume>15</volume>, <fpage>12</fpage>. <pub-id pub-id-type="doi">10.1167/15.6.12</pub-id>
<pub-id pub-id-type="pmid">26024459</pub-id>
</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Xiao</surname>
<given-names>L. Q.</given-names>
</name>
<name>
<surname>Klein</surname>
<given-names>S. A.</given-names>
</name>
<name>
<surname>Levi</surname>
<given-names>D. M.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Decoupling location specificity from perceptual learning of orientation discrimination</article-title>. <source>Vis. Res.</source> <volume>50</volume>, <fpage>368</fpage>&#x2013;<lpage>374</lpage>. <pub-id pub-id-type="doi">10.1016/j.visres.2009.08.024</pub-id>
<pub-id pub-id-type="pmid">19716377</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Valsecchi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gegenfurtner</surname>
<given-names>K. R.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The time course of chromatic adaptation in human early visual cortex revealed by SSVEPs</article-title>. <source>J. Vis.</source> <volume>23</volume>, <fpage>17</fpage>. <pub-id pub-id-type="doi">10.1167/jov.23.5.17</pub-id>
<pub-id pub-id-type="pmid">37223943</pub-id>
</citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Beams</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Badano</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Spatiotemporal image quality of virtual reality head mounted displays</article-title>. <source>Sci. Rep.</source> <volume>12</volume>, <fpage>20235</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-24345-9</pub-id>
<pub-id pub-id-type="pmid">36424434</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>