<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Hum. Neurosci.</journal-id>
<journal-title>Frontiers in Human Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Hum. Neurosci.</abbrev-journal-title>
<issn pub-type="epub">1662-5161</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnhum.2023.1255465</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Human Neuroscience</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>EasyEyes &#x2014; A new method for accurate fixation in online vision testing</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Kurzawski</surname>
<given-names>Jan W.</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0004"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2373298/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Pombo</surname>
<given-names>Maria</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn0004"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2354111/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Burchell</surname>
<given-names>Augustin</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hanning</surname>
<given-names>Nina M.</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/554363/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Liao</surname>
<given-names>Simon</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2543861/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Majaj</surname>
<given-names>Najib J.</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2225821/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Pelli</surname>
<given-names>Denis G.</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/17190/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Department of Psychology, New York University</institution>, <addr-line>New York, NY</addr-line>, <country>United States</country></aff>
<aff id="aff2"><sup>2</sup><institution>Institut f&#x00FC;r Psychologie, Humboldt Universit&#x00E4;t zu Berlin</institution>, <addr-line>Berlin</addr-line>, <country>Germany</country></aff>
<aff id="aff3"><sup>3</sup><institution>Center for Neural Science, New York University</institution>, <addr-line>New York, NY</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by" id="fn0005">
<p>Edited by: Rebecca J. Hirst, University of Nottingham, United Kingdom</p>
</fn>
<fn fn-type="edited-by" id="fn0006">
<p>Reviewed by: Matteo Toscani, Bournemouth University, United Kingdom; John A. Greenwood, University College London, United Kingdom</p>
</fn>
<corresp id="c001">&#x002A;Correspondence: Denis G. Pelli, <email>denis.pelli@nyu.edu</email></corresp>
<fn fn-type="equal" id="fn0004">
<p><sup>&#x2020;</sup>These authors have contributed equally to this work and share first authorship</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>29</day>
<month>11</month>
<year>2023</year>
</pub-date>
<pub-date pub-type="collection">
<year>2023</year>
</pub-date>
<volume>17</volume>
<elocation-id>1255465</elocation-id>
<history>
<date date-type="received">
<day>08</day>
<month>07</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>31</day>
<month>10</month>
<year>2023</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2023 Kurzawski, Pombo, Burchell, Hanning, Liao, Majaj and Pelli.</copyright-statement>
<copyright-year>2023</copyright-year>
<copyright-holder>Kurzawski, Pombo, Burchell, Hanning, Liao, Majaj and Pelli</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Online methods allow testing of larger, more diverse populations, with much less effort than in-lab testing. However, many psychophysical measurements, including visual crowding, require accurate eye fixation, which is classically achieved by testing only experienced observers who have learned to fixate reliably, or by using a gaze tracker to restrict testing to moments when fixation is accurate. Alas, both approaches are impractical online as online observers tend to be inexperienced, and online gaze tracking, using the built-in webcam, has a low precision (&#x00B1;4&#x2009;deg). EasyEyes open-source software reliably measures peripheral thresholds online with accurate fixation achieved in a novel way, without gaze tracking. It tells observers to use the cursor to track a moving crosshair. At a random time during successful tracking, a brief target is presented in the periphery. The observer responds by identifying the target. To evaluate EasyEyes fixation accuracy and thresholds, we tested 12 naive observers in three ways in a counterbalanced order: first, in the laboratory, using gaze-contingent stimulus presentation; second, in the laboratory, using EasyEyes while independently monitoring gaze using EyeLink 1000; third, online at home, using EasyEyes. We find that crowding thresholds are consistent and individual differences are conserved. The small root mean square (RMS) fixation error (0.6&#x2009;deg) during target presentation eliminates the need for gaze tracking. Thus, this method enables fixation-dependent measurements online, for easy testing of larger and more diverse populations.</p>
</abstract>
<kwd-group>
<kwd>fixation</kwd>
<kwd>online testing</kwd>
<kwd>eye tracker</kwd>
<kwd>crowding</kwd>
<kwd>EasyEyes</kwd>
<kwd>crosshair tracking</kwd>
<kwd>gaze control</kwd>
</kwd-group>
<counts>
<fig-count count="5"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="69"/>
<page-count count="12"/>
<word-count count="9223"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Brain Health and Clinical Neuroscience</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<title>Introduction</title>
<p>Online data collection offers researchers immediate access to thousands of participants around the world, which speeds up research and allows for more diverse samples (<xref ref-type="bibr" rid="ref48">Palan and Schitter, 2018</xref>; <xref ref-type="bibr" rid="ref17">Grootswagers, 2020</xref>). However, for researchers conducting visual fixation-dependent experiments, the appeal of online testing is frustrated by the inability to track gaze precisely. This is especially important when stimuli are presented in the periphery.</p>
<p>In peripheral testing, observers are torn between fixating on the central crosshair and looking toward the anticipated target location, which we call &#x201C;peeking&#x201D; (<xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>). If observers fixate on the anticipated location of the peripheral target, target eccentricity is almost zero, defeating the purpose of peripheral testing. In-lab eye tracking is widely used to ensure fixation. Typically, infrared light emitted by the eye tracker creates a reflection in the cornea of the eye, which is picked up by an infrared camera (<xref ref-type="bibr" rid="ref12">Eye Tracking 101, 2022</xref>). Thus, eye trackers can precisely report the eye&#x2019;s location and movement at any point in time. However, precise eye trackers are generally expensive, cumbersome, and require calibration. Importantly, they limit the study of fixation-dependent experiments to laboratory settings.</p>
<p>Precise gaze control is not available for online testing yet. Even though many researchers have devised tools and methods to gather eye-tracking data using participants&#x2019; webcams (<xref ref-type="bibr" rid="ref69">Xu et al., 2015</xref>; <xref ref-type="bibr" rid="ref24">Huang et al., 2016</xref>; <xref ref-type="bibr" rid="ref65">Valliappan et al., 2020</xref>), many of these tools still require calibration. An exception is <italic>WebGazer.js</italic> (<xref ref-type="bibr" rid="ref50">Papoutsaki et al., 2016</xref>), a prominent auto-calibrated eye-tracking tool that relies on the webcam to estimate the participant&#x2019;s gaze. Researchers have shown its effectiveness for various tasks (<xref ref-type="bibr" rid="ref57">Semmelmann and Weigelt, 2018</xref>; <xref ref-type="bibr" rid="ref58">Slim and Hartsuiker, 2022</xref>). Nevertheless, in the best-case scenario, its spatial accuracy is approximately 4&#x2009;deg, which would introduce a&#x2009;&#x00B1;&#x2009;40% error in the eccentricity of a target at 10&#x2009;deg eccentricity (<xref ref-type="bibr" rid="ref49">Papoutsaki, 2015</xref>; <xref ref-type="bibr" rid="ref24">Huang et al., 2016</xref>).</p>
<p>Ample research on eccentricity-based and polar angle-based differences in perception (see <xref ref-type="bibr" rid="ref62">Strasburger et al., 2011</xref>; <xref ref-type="bibr" rid="ref23">Himmelberg et al., 2023</xref> for reviews) relies on stable central eye fixation (<xref ref-type="bibr" rid="ref19">Guzman-Martinez et al., 2009</xref>). Visual crowding experiments are well-known fixation-dependent psychophysical tasks. Crowding, or the failure to recognize an object due to clutter, is typically measured by asking participants to recognize a letter between two flankers (<xref ref-type="bibr" rid="ref63">Stuart and Burian, 1962</xref>; <xref ref-type="bibr" rid="ref4">Bouma, 1973</xref>; <xref ref-type="bibr" rid="ref51">Pelli et al., 2004</xref>; <xref ref-type="bibr" rid="ref52">Pelli and Tillman, 2008</xref>; <xref ref-type="bibr" rid="ref61">Strasburger, 2020</xref>). <italic>Crowding distance</italic> (&#x201C;critical spacing&#x201D;) is the center-to-center distance from target to flanker that achieves a criterion level of performance. It increases with eccentricity, and thus, crowding is generally measured in the periphery (<xref ref-type="bibr" rid="ref3">Bouma, 1970</xref>; <xref ref-type="bibr" rid="ref64">Toet and Levi, 1992</xref>; <xref ref-type="bibr" rid="ref30">Kooi et al., 1994</xref>; <xref ref-type="bibr" rid="ref51">Pelli et al., 2004</xref>; <xref ref-type="bibr" rid="ref37">Levi and Carney, 2009</xref>).</p>
<p>Crowding varies 2-fold across observers (<xref ref-type="bibr" rid="ref54">Pelli et al., 2016</xref>; <xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>) and little within an observer for a given eccentricity and polar angle across sessions (<xref ref-type="bibr" rid="ref7">Chung, 2007</xref>; <xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>). Clinically, it plays a key role in amblyopia (<xref ref-type="bibr" rid="ref38">Levi et al., 2007</xref>) and exacerbates the effects of macular degeneration (<xref ref-type="bibr" rid="ref66">Wallace et al., 2017</xref>). It correlates with dyslexia and thus may be a valuable biomarker to guide early interventions designed to diminish problems in decoding letters and words (<xref ref-type="bibr" rid="ref36">Levi, 2008</xref>; <xref ref-type="bibr" rid="ref26">Joo et al., 2018</xref>; <xref ref-type="bibr" rid="ref39">Li et al., 2020</xref>). For crowding to fulfill its promise as a biomarker, accurate target eccentricity when testing is required. We have previously shown that measured crowding distance depends on fixation accuracy and inaccurate fixation impacts the mean and standard deviation of measured crowding thresholds (<xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>). One way to avoid inaccurate fixation is gaze-contingent stimulus presentation that here we call &#x201C;awaited fixation&#x201D;: While monitoring gaze with an eye tracker, the stimulus only appears after the observer has accurately fixated for 250&#x2009;ms. Unfortunately, online gaze tracking is not accurate enough to use this method.</p>
<p>Here, we demonstrate how EasyEyes,<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> an open-source online psychophysical testing tool, measures crowding thresholds reliably by achieving accurate fixation with a fine motor task and without eye tracking.</p>
<p>Researchers have shown that cursor movement generally correlates with eye movement (<xref ref-type="bibr" rid="ref6">Chen et al., 2001</xref>; <xref ref-type="bibr" rid="ref40">Liebling and Dumais, 2014</xref>). Moreover, looking at a target is required for precise and accurate hand movements (<xref ref-type="bibr" rid="ref25">Jana et al., 2017</xref>), and fixations are necessary when coordinating the movement between two objects (<xref ref-type="bibr" rid="ref35">Land and Hayhoe, 2001</xref>). With EasyEyes, observers perform the fine motor task of tracking a moving crosshair with their cursor. The peripheral target is presented after successful crosshair tracking for a random time of 0.75&#x2013;1.25&#x2009;s. This eye&#x2013;hand coordination task demands accurate fixation before target onset.</p>
<p>We compare thresholds between an at-home online crowding task (EasyEyes home), an in-lab version of the same online task (EasyEyes lab), and a previously validated crowding in-lab task (CriticalSpacing.m lab, <xref ref-type="bibr" rid="ref54">Pelli et al., 2016</xref>). We find that online EasyEyes crowding thresholds do not significantly differ from those measured in the laboratory. Additionally, we use gaze tracking while observers complete EasyEyes in the lab to validate that observers fixate on the moving crosshair during target presentation and do not peek.</p>
</sec>
<sec sec-type="methods" id="sec2">
<title>Methods</title>
<sec id="sec3">
<title>Observers</title>
<p>Twelve observers took part in our experiment. Seven identified as female and five as male. Their ages ranged from 21 to 46 (<italic>M</italic> =&#x2009;27.3, <italic>SD</italic> =&#x2009;6.8). All observers were fluent English speakers and had normal or corrected-to-normal vision. Importantly, observers were recruited via a convenience sample, ensuring that they had little to no experience with crowding tasks. Two-thirds of the observers were associated with the psychology department of New York University (graduate students, postdocs, and staff), but had no experience with vision psychophysical tasks. All observers gave informed consent in accordance with the Declaration of Helsinki and were compensated $15/h for their participation. This experiment was approved by the New York University Committee on Activities Involving Human Subjects (UCAIHS; IRB-FY2016-404).</p>
<p>All observers completed a visual crowding task in three ways: 1. CriticalSpacing.m (in lab), 2. EasyEyes (in lab), and 3. EasyEyes (at home). The order of the three ways was counterbalanced across observers to cancel out any order effects.</p>
</sec>
<sec id="sec4">
<title>Way 1: CriticalSpacing.m in lab</title>
<p>CriticalSpacing.m (<xref ref-type="bibr" rid="ref54">Pelli et al., 2016</xref>) is a thoroughly tested MATLAB program for measuring crowding thresholds, recently enhanced by the addition of a chin rest and gaze-contingent display (<xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>). The target (with flankers) is presented when gaze is detected within 1.5&#x2009;deg of the crosshair by an EyeLink 1000 eye tracker. Trials are retained only if fixation remains within 1.5&#x2009;deg of the crosshair throughout the target duration. Using this method, <xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref> report extensive crowding measurements on 50 observers.</p>
</sec>
<sec id="sec5">
<title>Way 2: Easyeyes in lab</title>
<p>Observers used the same chin rest, and EasyEyes online software measured the crowding threshold while the EyeLink 1000 independently monitored gaze. EasyEyes software had no access to the gaze data. An EasyEyes log recorded a time stamp in absolute POSIX time (in fractions of a second), and the crosshair, cursor, and target position every frame (60&#x2009;Hz). A MATLAB program running in parallel saved a POSIX timestamp and gaze position every 10&#x2009;ms.</p>
</sec>
<sec id="sec6">
<title>Way 3: Easyeyes at home</title>
<p>Each observer opened the URL of the EasyEyes experiment in a browser on their own computer and ran the experiment online.</p>
<p>Observers who initially completed CriticalSpacing.m in the laboratory or EasyEyes in the laboratory may have inferred that the task required strict fixation, potentially biasing their subsequent fixation performance on EasyEyes at home. We therefore counterbalanced the order in which observers completed the conditions.</p>
</sec>
<sec id="sec7">
<title>Identification task</title>
<p>In all testing methods, observers completed a simple letter recognition task that measures crowding in the visual periphery. In each trial, the observer is presented with a trigram of letters for 150&#x2009;ms. We refer to the middle letter as the target and the other two as the flankers. For each trial, target and flankers are drawn randomly, without replacement, from a nine-letter character set: DHKNORSVZ. Letters are rendered in black in the Sloan font on a uniform white background of approximately 275&#x2009;cd/m<sup>2</sup> (<xref ref-type="bibr" rid="ref59">Sloan et al., 1952</xref>; <xref ref-type="bibr" rid="ref54">Pelli et al., 2016</xref>). We omit the C from Louise Sloan&#x2019;s original 10 letters because it is too easily confused with the O (<xref ref-type="bibr" rid="ref10">Elliott et al., 1990</xref>). The Sloan letters all have the same square (invisible) bounding box. The target letter is presented so that its center is either &#x2212;10&#x2009;deg or&#x2009;+&#x2009;10&#x2009;deg from the fixation crosshair along the horizontal midline. The flankers are presented symmetrically, to the right and left of the target. The spacing, center of target to center of each flanker varies from trial to trial, guided by QUEST. After the brief presentation, the list of nine possible letters is displayed, and the observer is asked to identify the target by clicking (or typing, in the case of the EasyEyes home session) one of the nine letters displayed. Only the valid characters (nine Sloan letters) are accepted as responses, and any other keypress is ignored.</p>
<p>As our observers were naive to the task, they completed a brief (2&#x2013;3&#x2009;min) online training session which consisted of 10 trials, 5 at each of &#x00B1;10&#x2009;deg of eccentricity, prior to any session.</p>
</sec>
<sec id="sec8">
<title>Measuring threshold</title>
<p>In each block, we use QUEST (<xref ref-type="bibr" rid="ref67">Watson and Pelli, 1983</xref>) to control the letter spacing of each trial and finally estimate the crowding distance threshold. Each threshold estimate was based on 35 trials. Each block of trials interleaved two conditions, one for &#x2212;10&#x2009;deg and another for +10&#x2009;deg (resulting in 35 trials per condition and 70 trials per block). Each participant completed two blocks in each session (140 trials per session). Letter size scales with spacing, maintaining a fixed ratio of 1.4:1 (in EasyEyes, <italic>spacingOverSizeRatio</italic> =&#x2009;1.4). Threshold was defined as the letter spacing for 70% correct identification, and QUEST assumes a Weibull function. In EasyEyes, we specify <italic>thresholdProportionCorrect</italic> as 70, <italic>thresholdParameter</italic> as &#x201C;spacing,&#x201D; and <italic>thresholdGuess</italic> to be 3. The remainder threshold parameters conserved their default values (<italic>thresholdDelta</italic> is 0.01, <italic>thresholdBeta</italic> is 2.3, and <italic>thresholdGamma</italic> is 0.5). These values match those of CriticalSpacing.m (<xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>).</p>
<p>The conditions for target presentation differ depending on the experimental software. CriticalSpacing.m uses gaze-contingent stimulus presentation while EasyEyes relies on crosshair tracking. These are described below.</p>
</sec>
<sec id="sec9">
<title>Gaze-contingent CriticalSpacing.m</title>
<p>We measured crowding thresholds using CriticalSpacing.m (<xref ref-type="bibr" rid="ref54">Pelli et al., 2016</xref>) with additional features that integrated compatibility with the EyeLink eye tracker. This enhanced CriticalSpacing.m uses gaze-contingent stimulus presentation that we call &#x201C;awaited fixation&#x201D; (<xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>). At the start of the experiment, a central crosshair is shown on the screen. The first trial begins when the observer presses the spacebar. After correct fixation for 250&#x2009;ms, a letter trigram is displayed for 150&#x2009;ms. After the stimulus offset, the observer uses a mouse to click to report the middle letter of the trigram. All possible letters appear in a row below fixation. After clicking on the letter, the observers are instructed to look back at the central crosshair. A correct response is acknowledged with a short beep. Subsequently, the computer waits for the observer to maintain fixation within 1.5&#x2009;deg of the crosshair for 250&#x2009;ms. If the waiting period exceeds 10&#x2009;s, software prompts for recalibration of the gaze tracker.</p>
</sec>
<sec id="sec10">
<title>Apparatus</title>
<p>In the laboratory, observers used a chin rest to maintain a 40&#x2009;cm viewing distance from eye to display. To track gaze in the laboratory, we used an EyeLink 1000 eye tracker (SR Research, Ottawa, Ontario, Canada) with a sampling rate of 1,000&#x2009;Hz. To allow for a short viewing distance, we used their Tower mount setup with a 25-mm lens.</p>
<p>Each in-lab session was completed with an Apple iMac 27&#x2033; with an external monitor for stimulus presentation. The screen resolution was 5,120&#x2009;&#x00D7;&#x2009;2,880. Apple iMac has AMD graphics for optimal compatibility with Psychtoolbox imaging software. The Systems Preference: Displays: Brightness slider was set (by calling MacDisplaySettings.m in the Psychtoolbox) to 0.86 (range 0 to 1) to achieve a white background luminance of approximately 275&#x2009;cd/m<sup>2</sup>. The observer viewed the screen binocularly. Stimuli were rendered using <italic>CriticalSpacing</italic>.<italic>m</italic> software (<xref ref-type="bibr" rid="ref54">Pelli et al., 2016</xref>) implemented in MATLAB 2021 using the Psychtoolbox (<xref ref-type="bibr" rid="ref5">Brainard, 1997</xref>).</p>
</sec>
<sec id="sec11">
<title>EasyEyes</title>
<p>EasyEyes (see text footnote 1) is open-access software to measure thresholds online. With a Pavlovia<xref ref-type="fn" rid="fn0002"><sup>2</sup></xref> account, the scientist can upload an experiment table with an alphabetical list of parameters along with corresponding files (consent forms and fonts) to the EasyEyes website and obtain an experiment link. EasyEyes integrates Prolific<xref ref-type="fn" rid="fn0003"><sup>3</sup></xref> to allow scientists to easily recruit paid participants from all over the world. After participants complete the experiment, EasyEyes provides easy access to the data as well as tools for data analysis and visualization.</p>
<p>EasyEyes has 305 parameters that allow scientists flexibility to include questionnaires and measure various variables, including reading speed and accuracy, visual acuity, and hearing audiogram. EasyEyes uses the &#x201C;virtual chinrest&#x201D; method of <xref ref-type="bibr" rid="ref39">Li et al. (2020)</xref> to measure screen size and viewing distance and uses Google FaceMesh (<xref ref-type="bibr" rid="ref27">Kartynnik et al., 2019</xref>) to continuously track viewing distance throughout the experiment.</p>
</sec>
<sec id="sec12">
<title>Experimental design</title>
<p>For the EasyEyes version of the letter identification task, we implement the CriticalSpacing.m task described above as closely as possible. Our spreadsheet specifies 3 blocks of two target tasks: one <italic>questionAndAnswer</italic> block (that asks observers for their participant ID and age) and two <italic>identify</italic> blocks. Each <italic>identify</italic> block has two interleaved conditions of 35 trials each. The only difference between the conditions is whether the target position is specified at &#x00B1;10&#x2009;deg (<italic>targetEccentricityXDeg</italic> = 10 or&#x2009;&#x2212;&#x2009;10 and <italic>targetEccentricityYDeg</italic> = 0). In this way, each block calculates two thresholds, one for the right and one for the left meridian. We specify the threshold criterion proportion correct (70%), the viewing distance (40&#x2009;cm), and the stimulus presentation time (0.15&#x2009;s) using the <italic>thresholdProportionCorrect</italic>, <italic>viewingDistanceDesiredCm</italic>, and <italic>targetDurationSec</italic> parameters, respectively. We specify <italic>targetKind</italic> to be &#x201C;letter,&#x201D; <italic>spacingDirection</italic> to be &#x201C;radial,&#x201D; <italic>spacingRelationToSize</italic> to be &#x201C;ratio,&#x201D; and <italic>spacingSymmetry</italic> to be &#x201C;screen.&#x201D; We also provide software with the WOFF2 file of the Sloan font and indicate it as such using the <italic>font</italic>, <italic>fontSource</italic>, and <italic>fontCharacterSet</italic> parameters.</p>
<p>There are three differences between the at-home and in-lab EasyEyes experiments. First, the in-lab version sets the <italic>_trackGazeExternallyBool</italic> parameter to TRUE to save a log of timestamped screen locations of the crosshair, cursor, and (when present) target. Second, the at-home experiment requires observers to calibrate their screen size and viewing distance as described above. Finally, in the at-home experiment, the <italic>viewingDistanceNudgingBool</italic> parameter is set to TRUE so observers are told to move farther away from or closer to the screen if their viewing distance is less than 80% or greater than 120% of the specified 40&#x2009;cm.</p>
</sec>
<sec id="sec13">
<title>Moving crosshair</title>
<p>Traditionally, many vision experiments ask observers to fix their gaze on a static fixation mark, which is often a crosshair. Naive observers struggle to hold their gaze on a central mark while anticipating a peripheral target. Instead, EasyEyes tells the observer to use the cursor to track a moving crosshair.</p>
<p>Each trial presents a moving black crosshair consisting of a vertical and a horizontal line crossing at their midpoints, each 2&#x2009;deg long and 0.05&#x2009;deg thick. The crosshair has an invisible &#x201C;hotspot&#x201D; disk with a 0.1&#x2009;deg radius about its center location. Until stimulus presentation, the crosshair moves steadily, counterclockwise, along an invisible circular trajectory centered on the screen center, with a radius of 0.5&#x2009;deg and a period of 10&#x2009;s, resulting in a speed of 0.3&#x2009;deg/s. The initial position of the crosshair is a random point on the circular path. The observer is told to use the cursor to track the center of the crosshair. Tracking is considered successful while the cursor tip is in the hotspot, and the crosshair becomes &#x201C;bold&#x201D; (increasing line thickness from 0.05 to 0.07&#x2009;deg) while tracking is successful. This feedback helps the participant to quickly learn tracking.</p>
<p>In each trial, the tracking period is a small fraction of the circle. If the observer is tracking continuously, the average tracking duration is 1&#x2009;s, which corresponds to 9% of the circle. The arc angle of the path is so small that the path curvature is hardly noticeable. In a limited exploration of hotspot radius, crosshair speed, and radius of curvature of crosshair movement, we settled on a hotspot radius of 0.1&#x2009;deg (which yields excellent overall performance, RMSE &#x003C;0.1&#x2009;deg), a crosshair speed of 0.3&#x2009;deg/s, and a radius of 0.5&#x2009;deg. We explored different values but found that observers became frustrated by smaller radii or higher speeds. Similarly, an unpredictable &#x201C;random walk&#x201D; might produce more precise fixation, but would similarly frustrate observers or steal attention from the target. More investigation of these parameters is warranted.</p>
</sec>
<sec id="sec14">
<title>Coordinates</title>
<p>This study uses two spatial coordinate systems to specify stimulus and gaze position: <italic>Screen coordinates</italic> (pix) are X and Y pixels, with the origin in the upper left corner of the screen (and y increases down). <italic>Visual coordinates</italic> (deg) are X and Y gaze positions relative to the current location of the crosshair (and y increases up). The target is presented at 10&#x2009;deg left or right of the crosshair center. (Stimulus display software needs to convert back and forth between these coordinate systems. The <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref> provides these routines in MATLAB and JavaScript.)</p>
</sec>
<sec id="sec15">
<title>Pre-stimulus interval</title>
<p>Before the first trial, participants received verbal instruction that they should use a mouse or trackpad to track the moving cross and should identify the middle letter presented in the periphery. At the start of each trial, EasyEyes displays the instruction in the upper left corner of the screen, &#x201C;Ready? Track the center of the crosshair until the letter(s) appear.&#x201D; (In order to avoid ambiguity in what is meant by &#x201C;tracking,&#x201D; we have since changed the instructions to &#x201C;Ready? Use the cursor to track the center of the crosshair until the target appears. The crosshair becomes bold when the cursor is on it.&#x201D;) We hypothesized that keeping the cursor near the crosshair forces the observer to fixate near the crosshair. Meanwhile, EasyEyes displays the cursor and moving crosshair, and checks the cursor position every frame until the cursor tip is in the crosshair hotspot; then, it starts a timer, which waits for a duration randomly sampled from a uniform probability interval of 0.75 to 1.25&#x2009;s. EasyEyes checks the cursor position again at the end of the (unannounced) tracking interval. If the cursor tip is in the hotspot then EasyEyes presents the target. Otherwise, EasyEyes restarts the timer, without disturbing the crosshair&#x2019;s motion. (Since collecting the data presented here, we made the cursor-tracking criterion more stringent. Now the cursor is checked repeatedly during the tracking interval and any exit from the hot spot causes EasyEyes to go back to the start.)</p>
</sec>
<sec id="sec16">
<title>Stimulus and response</title>
<p>The target (with flankers) is displayed immediately, for 150&#x2009;ms. During the target presentation, the crosshair and cursor are hidden. 700&#x2009;ms after the stimulus offset (specified by <italic>targetSafetyMarginSec</italic>), the following instructions appear: &#x201C;Please identify the middle letter by clicking it below or pressing it on the keyboard.&#x201D;</p>
</sec>
<sec id="sec17">
<title>Monitoring gaze while testing with EasyEyes</title>
<p>EasyEyes does not have gaze tracking. We use an in-lab EyeLink 1000 eye tracker to assess the accuracy of fixation during crosshair tracking by the cursor. A simple handshake between EasyEyes and MATLAB (controlling the EyeLink 1000) tells MATLAB when the experiment begins and ends. The handshake uses a RESTful API to a Node.js server. EasyEyes sends a start command by making a POST request to the RESTful server. Meanwhile, MATLAB repeatedly sends GET requests to the server until it receives the &#x201C;start&#x201D; command from EasyEyes. For each display frame (60&#x2009;Hz), EasyEyes records the POSIX (absolute time in fractions of a second) timestamp, and the X and Y screen positions of the crosshair, cursor, and target. MATLAB receives X and Y gaze position from the EyeLink every 10&#x2009;ms and records it along with a POSIX timestamp. This produces two timestamped CSV files, one from MATLAB and one from EasyEyes, which were combined to generate the plots seen here.</p>
</sec>
<sec id="sec18">
<title>RMSE of gaze and cursor position</title>
<p>We estimated RMSE by calculating the radial distance between either cursor and crosshair positions (tracking error) or gaze and crosshair positions (gaze pursuit error) for each frame of the stimulus presentation. These errors were averaged within and across trials to produce RMS errors per observer. In both cases (tracking and gaze pursuit errors), we report the mean error across observers.</p>
</sec>
<sec id="sec19">
<title>Correction of eye tracker calibration offset in X-Y gaze position</title>
<p>The eye tracker is calibrated once before the session by asking the observer to fixate in the center and at 5&#x2009;deg above, below, right, and left from the center. We find that the center of the screen is reported with a small consistent offset unique to each observer session. We estimated and removed this offset. A correction was determined independently for each observer session by calculating the mean X and Y offset between crosshair and recorded gaze position across all gaze samples obtained during the 750&#x2009;ms interval before stimulus onset (75 gaze samples per trial, and 140 trials). This single offset correction was applied to every gaze position of the observer in that session. Across observers, the mean&#x2009;&#x00B1;&#x2009;SD RMS radial offset was 0.64&#x2009;&#x00B1;&#x2009;0.25&#x2009;deg.</p>
</sec>
<sec id="sec20">
<title>Statistical analysis of crowding thresholds</title>
<p>Test&#x2013;retest correlation was assessed between log crowding distances. We also calculate the test&#x2013;retest variance (which is reported as SD) as the square root of mean variance across observers. To evaluate the difference between methods, we conducted a one-way ANOVA with log crowding distance as the dependent variable and method (CriticalSpacing.m lab, EasyEyes lab, and EasyEyes home) as the independent variable and calculated a corresponding Bayes Factor using the anovaBF() function of the BayesFactor package in R (<xref ref-type="bibr" rid="ref44">Morey et al., 2023</xref>). Furthermore, we evaluate the difference in log crowding threshold variance with pairwise F-tests for equal variance. To assess whether individual differences are conserved across methods, we compute each observer&#x2019;s geometric mean threshold (4 thresholds: left and right, test and retest) and calculate the Pearson&#x2019;s correlation coefficient for all pairs of methods. To assess how well observer differences are conserved we computed Pearson&#x2019;s correlations (of the geometric mean across left and right meridians) across methods and across test and retest within each method. For example, to compare CriticalSpacing.m lab to EasyEyes home, we correlate the test thresholds in the former to the retest thresholds in the latter, and vice versa. We also calculate the intraclass correlation coefficient across all three methods. All analyses were conducted in R (version 4.2.3) using R Studio.</p>
</sec>
</sec>
<sec sec-type="results" id="sec21">
<title>Results</title>
<sec id="sec22">
<title>Crowding thresholds agree across test&#x2013;retest</title>
<p>We measured radial crowding thresholds in 12 observers on the right and left meridian at 10&#x2009;deg of eccentricity using three experimental methods (EasyEyes home, EasyEasy lab, and CriticalSpacing.m lab). To assess the reliability of each method, we tested each threshold twice in two blocks separated by a break. We find that for all methods the test&#x2013;retest correlations are highly significant (<italic>p</italic> &#x003C;&#x2009;0.01). The test&#x2013;retest standard deviation was similar across methods (<xref ref-type="fig" rid="fig1">Figure 1A</xref>) and was not different from the results for 50 observers tested by <xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref> in an in-lab setting. Summary of standard deviations for our three methods and <xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref> are shown in <xref ref-type="table" rid="tab1">Table 1</xref>. <xref ref-type="fig" rid="fig1">Figure 1A</xref> directly compares test and retest thresholds across methods. In each method, the retest over test ratio of crowding distance was approximately 0.8, slightly smaller than 0.9 in previous reports (<xref ref-type="bibr" rid="ref7">Chung, 2007</xref>; <xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>). The improvement of the second session was independent of which method was used first to test the observer. Overall, crowding thresholds based on one 35-trial QUEST staircase have similarly good reproducibility across all three methods.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Log crowding thresholds across methods. <bold>(A)</bold> Test&#x2013;retest crowding distances across methods. Gray triangles are thresholds measured by <xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref>, and colored triangles are newly acquired data. Axes are log&#x2013;log. <bold>(B)</bold> Histograms of log thresholds across methods. <italic>M</italic> is a geometric mean (dashed line) and SD is the standard deviation of all measured log crowding distances. <italic>N</italic> is the number of observations (12 observers, two meridians, test and retest) for our data and for fraction of data from Kurzawski et al. (50 observers, two meridians, test&#x2013;retest).</p>
</caption>
<graphic xlink:href="fnhum-17-1255465-g001.tif"/>
</fig>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Comparing test&#x2013;retest thresholds for all methods.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Method of measuring crowding distance test&#x2013;retest</th>
<th align="center" valign="top">Test&#x2013;retest SD</th>
<th align="center" valign="top">Pearson&#x2019;s <italic>r</italic></th>
<th align="center" valign="top">Pearson&#x2019;s <italic>p</italic></th>
<th align="center" valign="top">Ratio</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="bottom">CriticalSpacing.m lab</td>
<td align="center" valign="bottom">0.16</td>
<td align="center" valign="bottom">0.58</td>
<td align="center" valign="bottom">&#x003C;0.01</td>
<td align="center" valign="bottom">0.78</td>
</tr>
<tr>
<td align="left" valign="bottom">EasyEyes (home)</td>
<td align="center" valign="bottom">0.18</td>
<td align="center" valign="bottom">0.53</td>
<td align="center" valign="bottom">&#x003C;0.01</td>
<td align="center" valign="bottom">0.82</td>
</tr>
<tr>
<td align="left" valign="bottom">EasyEyes (lab)</td>
<td align="center" valign="bottom">0.14</td>
<td align="center" valign="bottom">0.76</td>
<td align="center" valign="bottom">&#x003C;0.01</td>
<td align="center" valign="bottom">0.79</td>
</tr>
<tr>
<td align="left" valign="bottom">
<xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref></td>
<td align="center" valign="bottom">0.11</td>
<td align="center" valign="bottom">0.55</td>
<td align="center" valign="bottom">&#x003C;0.01</td>
<td align="center" valign="bottom">0.94</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>Test&#x2013;retest SD represents the mean across observers of the standard deviation of test and retest of log crowding distance.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec23">
<title>Crowding thresholds agree across methods</title>
<p>Crowding varies across observers and very little across sessions within an observer (<xref ref-type="bibr" rid="ref7">Chung, 2007</xref>; <xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>). While it is important to assess crowding&#x2019;s reproducibility within a testing session or experiment, the core of this study is to compare crowding thresholds across three methods. Despite the variations between these methods, we find no significant differences between their measured thresholds. A one-way ANOVA shows no significant difference in mean log crowding threshold estimates, <italic>F</italic>(2)&#x2009;=&#x2009;1.19, <italic>p</italic> =&#x2009;0.308 The corresponding Bayes factor (BF01&#x2009;=&#x2009;5.4) indicates substantial evidence for the null hypothesis, which states no difference between the testing methods. Pairwise F-tests of equal variance show no significant difference in the log variance across methods: <italic>F</italic>(47, 47)&#x2009;=&#x2009;1.26, <italic>p</italic> =&#x2009;0.429 (EasyEyes lab vs. CriticalSpacing.m lab), <italic>F</italic>(47, 47)&#x2009;=&#x2009;1.49, <italic>p</italic> =&#x2009;0.173 (EasyEyes home vs. CriticalSpacing.m lab), and <italic>F</italic>(47, 47)&#x2009;=&#x2009;0.85, <italic>p</italic> =&#x2009;0.566 (EasyEyes lab vs. EasyEyes home).</p>
<p>The geometric mean (and SD log) was 3.06 (0.23) deg for EasyEyes lab, 3.00 (0.25) deg for EasyEyes home, and 3.5 (0.20) deg for CriticalSpacing.m lab (<xref ref-type="fig" rid="fig1">Figure 1B</xref>). Additionally, these estimates closely resembled the 50-observer crowding survey published by <xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref> 2.47 (0.16) deg.</p>
</sec>
<sec id="sec24">
<title>Individual differences are conserved across methods</title>
<p>Here, we check whether individual differences are reproducible across the three methods. Pearson&#x2019;s correlation coefficients across methods are high, showing that these differences were conserved (<xref ref-type="fig" rid="fig2">Figure 2A</xref>). Furthermore, the test&#x2013;retest correlations within each method are not different from test&#x2013;retest across methods (<xref ref-type="fig" rid="fig2">Figure 2B</xref>). This is indicated by similar values of Pearson&#x2019;s rank correlation coefficients across the whole correlation matrix. To evaluate the consistency across all three methods, we calculated the intraclass correlation coefficient (ICC), which was 0.77 and indicates good reliability (<xref ref-type="bibr" rid="ref29">Koo and Li, 2016</xref>).</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Correlations of crowding distance across methods. <bold>(A)</bold> The cross-method correlations of the geometric mean crowding distance for each observer. Mean is calculated from 4 thresholds (2 meridians, test&#x2013;retest). <bold>(B)</bold> Test&#x2013;retest Pearson&#x2019;s correlation across mean crowding distance thresholds, across and within methods.</p>
</caption>
<graphic xlink:href="fnhum-17-1255465-g002.tif"/>
</fig>
</sec>
<sec id="sec25">
<title>Fixational accuracy of EasyEyes</title>
<p>Observers are asked to use the cursor to track the moving crosshair, which they do quite well (RMSE of 0.08&#x2009;deg). During tracking, the target appears at a random time (between 0.75 and 1.25&#x2009;s), so the observer cannot predict when to look toward the anticipated target location. When the timed interval ends, the crosshair disappears, the cursor is hidden, and the target appears.</p>
<p>We used gaze tracking to monitor how well this foveal tracking task achieves correct fixation during stimulus presentation. For reference, we similarly analyze the conventional awaited-fixation method (gaze-contingent stimulus presentation), which uses gaze tracking (<xref ref-type="bibr" rid="ref31">Kreyenmeier et al., 2020</xref>; <xref ref-type="bibr" rid="ref20">Hanning and Deubel, 2022</xref>; <xref ref-type="bibr" rid="ref21">Hanning et al., 2022</xref>; <xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>; <xref ref-type="bibr" rid="ref33">Kwak et al., 2023</xref>).</p>
<p>While observers are tracking, their gaze remains near the crosshair (RMS of 0.6&#x2009;deg). <xref ref-type="fig" rid="fig3">Figure 3A</xref> shows X and Y screen coordinates of gaze, cursor, and crosshair position as a function of time relative to stimulus onset. <xref ref-type="fig" rid="fig3">Figure 3B</xref> traces the X and Y position of gaze, crosshair, and cursor for one trial per participant during the last 750&#x2009;ms of tracking.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Cursor tracking task. X and Y positions of crosshair (solid black line), cursor (solid colored line), and gaze (colored points) during an EasyEyes trial. The gray bar corresponds to 1&#x2009;deg (60 pix/deg). <bold>(A)</bold> X and Y coordinates as a function of time relative to the stimulus onset of one observer. The light blue bar represents the target duration (150&#x2009;ms). <bold>(B)</bold> Shows a single representative trial from each observer (750&#x2009;ms before target onset). The black circle is the trajectory of the crosshair. Again, thick colored lines indicate cursor, and colored dots indicate gaze position. Each observer&#x2019;s data have been rotated around a circle (crosshair&#x2019;s trajectory) to minimize overlap with other observers. The pink trial in <bold>(A)</bold> corresponds to S12 plotted in <bold>(B)</bold>. All X and Y positions have been corrected for estimated calibration bias.</p>
</caption>
<graphic xlink:href="fnhum-17-1255465-g003.tif"/>
</fig>
</sec>
<sec id="sec26">
<title>Reliability of fixation with EasyEyes</title>
<p><xref ref-type="fig" rid="fig3">Figure 3B</xref> presents single-trial gaze tracking before stimulus presentation. <xref ref-type="fig" rid="fig4">Figure 4</xref> shows gaze (visual coordinates), before, during, and after the stimulus presentation. Visual coordinates are in degrees relative to the center of the crosshair. The mean gaze position (obtained every 10&#x2009;ms) across all trials (140), and all 12 observers is within 0.03&#x2009;deg of the crosshair before and during the stimulus presentation, and within 0.3&#x2009;deg after the stimulus presentation. The standard deviation is the lowest in the pre-stimulus interval, while observers are tracking (SD X&#x2009;=&#x2009;0.97&#x2009;deg, SD Y&#x2009;=&#x2009;0.48&#x2009;deg), increases during stimulus presentation (SD X&#x2009;=&#x2009;2.03&#x2009;deg, SD Y&#x2009;=&#x2009;0.9&#x2009;deg) and is highest after stimulus offset (SD X&#x2009;=&#x2009;6.36&#x2009;deg, SD Y&#x2009;=&#x2009;1.67&#x2009;deg). The higher standard deviation in horizontal direction before and during stimulus presentation reflects the overall tendency of eye movements to be directed horizontally (<xref ref-type="bibr" rid="ref11">Engbert, 2006</xref>; <xref ref-type="bibr" rid="ref45">Najemnik and Geisler, 2008</xref>; <xref ref-type="bibr" rid="ref47">Otero-Millan et al., 2013</xref>; <xref ref-type="bibr" rid="ref56">Rolfs and Schweitzer, 2022</xref>). The pronounced variance in horizontal gaze position after stimulus offset indicates (saccadic) eye movements toward the (now absent) target&#x2014;a phenomenon commonly referred to as &#x201C;looking at nothing&#x201D; (<xref ref-type="bibr" rid="ref13">Ferreira et al., 2008</xref>). Note that this gaze variance does not affect our perceptual measurement as the target was already undrawn. For gaze deviations during stimulus presentation, see <xref ref-type="table" rid="tab2">Table 2</xref>.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>X and Y coordinates (in deg) of gaze before, during, and after target presentation across participants. <bold>(A)</bold> Shows 2D histograms of gaze. The circle indicates the eye tracker precision and the red cross is the target location. Total counts differ due to variations in the amount of data presented (500&#x2009;ms vs. 150&#x2009;ms) and the differences in the count of the bin with max counts. The 500&#x2009;ms period is entirely within the 700&#x2009;ms between target offset and response instructions. <bold>(B)</bold> Shows gaze position in X and Y coordinates. The red vertical line indicates the target location.</p>
</caption>
<graphic xlink:href="fnhum-17-1255465-g004.tif"/>
</fig>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption>
<p>Standard deviations (in deg) of gaze position in X and Y coordinates during stimulus presentation for each participant using EasyEyes lab.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Observer</th>
<th align="center" valign="top">SD X (deg)</th>
<th align="center" valign="top">SD Y (deg)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">S1</td>
<td align="center" valign="bottom">0.58</td>
<td align="center" valign="bottom">1.43</td>
</tr>
<tr>
<td align="left" valign="top">S2</td>
<td align="center" valign="bottom">0.45</td>
<td align="center" valign="bottom">0.37</td>
</tr>
<tr>
<td align="left" valign="top">S3</td>
<td align="center" valign="bottom">1.84</td>
<td align="center" valign="bottom">1.38</td>
</tr>
<tr>
<td align="left" valign="top">S4</td>
<td align="center" valign="bottom">1.02</td>
<td align="center" valign="bottom">0.93</td>
</tr>
<tr>
<td align="left" valign="top">S5</td>
<td align="center" valign="bottom">1.51</td>
<td align="center" valign="bottom">0.43</td>
</tr>
<tr>
<td align="left" valign="top">S6</td>
<td align="center" valign="bottom">0.31</td>
<td align="center" valign="bottom">0.16</td>
</tr>
<tr>
<td align="left" valign="top">S7</td>
<td align="center" valign="bottom">0.62</td>
<td align="center" valign="bottom">0.48</td>
</tr>
<tr>
<td align="left" valign="top">S8</td>
<td align="center" valign="bottom">0.45</td>
<td align="center" valign="bottom">0.52</td>
</tr>
<tr>
<td align="left" valign="top">S9</td>
<td align="center" valign="bottom">6.54</td>
<td align="center" valign="bottom">1.92</td>
</tr>
<tr>
<td align="left" valign="top">S10</td>
<td align="center" valign="bottom">0.53</td>
<td align="center" valign="bottom">0.34</td>
</tr>
<tr>
<td align="left" valign="top">S11</td>
<td align="center" valign="bottom">0.68</td>
<td align="center" valign="bottom">0.50</td>
</tr>
<tr>
<td align="left" valign="top">S12</td>
<td align="center" valign="bottom">0.37</td>
<td align="center" valign="bottom">0.25</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>One out of 12 observers (S9) has a much higher standard deviation (<xref ref-type="table" rid="tab2">Table 2</xref>) and often peeked at approximately 120&#x2009;ms after stimulus onset. The short latency indicates that this participant has planned the eye movements with target onset. The same observer also peeked during many CriticalSpacing.m lab trials where they were detected and rejected by gaze tracking (<xref ref-type="fig" rid="fig5">Figure 5</xref>). The 11 remaining observers showed negligible peeking.</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Comparing peeking across methods. The plot shows the percentage of trials in which observers peeked that is their gaze position was more than 1.5&#x2009;deg away from the crosshair during stimulus presentation. For CriticalSpacing.m lab, peeks are detected by the eye tracker and correspond to rejected trials. For EasyEyes lab, we use gaze data to calculate the percentage of peeks post-hoc.</p>
</caption>
<graphic xlink:href="fnhum-17-1255465-g005.tif"/>
</fig>
</sec>
<sec id="sec27">
<title>Less peeking with moving crosshair task than with static fixation</title>
<p>CriticalSpacing.m lab counts the peeks and repeats the trials in which observers peeked. While using EasyEyes lab, we monitored gaze position to count each observer&#x2019;s peeks. In the last section, we showed that observers fixate near the crosshair while tracking it with a cursor. Here, we wondered whether the tracking also reduces the urge to peek. We compared how often observers peek using a stationary crosshair method in CriticalSpacing.m lab versus the moving crosshair tracking task with EasyEyes lab. A peek is deemed to occur when the observer&#x2019;s gaze deviates more than 1.5&#x2009;deg from the last crosshair center position during stimulus presentation. The tracking task roughly halved the number of peeks across observers (median decreased from 9.3% to 5.4%). Individual data are shown in <xref ref-type="fig" rid="fig5">Figure 5</xref>. Even with a rather short stimulus duration (150&#x2009;ms), one observer managed to peek with both methods. However, their peeking did not change their mean crowding distance relative to other observers. Furthermore, their thresholds were consistent across methods even though CriticalSpacing.m lab rejects peeking trials and EasyEyes cannot.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec28">
<title>Discussion</title>
<p>EasyEyes offers a new task to achieve accurate fixation online. We evaluated the accuracy of fixation and compared crowding thresholds measured online with EasyEyes or with our in-lab method (<xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>).</p>
<p>We tested 12 naive observers using traditional fixation and gaze-contingent stimulus display (CriticalSpacing.m lab), and using EasyEyes online at home (EasyEyes home) as well as in the laboratory while independently monitoring gaze (EasyEyes lab). Comparing the mean and standard deviation in thresholds across observers, we do not find significant differences across methods. Cross-method and within-method correlations are not different and individual differences are conserved. With a gaze tracker, we validate that EasyEyes achieves accurate fixation during target presentation.</p>
<sec id="sec29">
<title>Importance of accurate fixation</title>
<p>Visual sensitivity decays with increasing distance from the fovea (the center of gaze). The density of photoreceptor and midget retinal ganglion cells declines with retinal eccentricity, increasing receptive field size (<xref ref-type="bibr" rid="ref14">Freeman and Simoncelli, 2011</xref>; <xref ref-type="bibr" rid="ref1">Anton-Erxleben and Carrasco, 2013</xref>). Thus, the visual system loses sensitivity (e.g., to higher spatial frequencies, contrast, or orientation changes) in the periphery. As a consequence, performance in nearly all visual tasks scales with the retinal eccentricity of the test stimulus [but see <xref ref-type="bibr" rid="ref20">Hanning and Deubel (2022)</xref> for an eccentricity-independent approach]. Visual crowding is no exception: crowding distance scales linearly with eccentricity (<xref ref-type="bibr" rid="ref3">Bouma, 1970</xref>). In order to achieve a stable threshold estimate, precise control of fixation is indispensable to ensure consistent measurement at the desired retinal eccentricity.</p>
</sec>
<sec id="sec30">
<title>Gaze during cursor tracking</title>
<p>Previous research has shown that successful tracking of a moving object with a hand-controlled cursor requires that gaze should closely follow the moving object (<xref ref-type="bibr" rid="ref68">Xia and Barnes, 1999</xref>; <xref ref-type="bibr" rid="ref46">Niehorster et al., 2015</xref>; <xref ref-type="bibr" rid="ref8">Danion and Flanagan, 2018</xref>). Based on this, we asked observers to use the cursor to track the moving crosshair with the goal of keeping their gaze near the crosshair. Indeed, all of our observers use the cursor reliably to track the crosshair, keeping their gaze near both. This ensures the desired retinal target eccentricity. Both gaze and hand tend to lag the target (<xref ref-type="bibr" rid="ref28">Koken and Erkelens, 1992</xref>).</p>
</sec>
<sec id="sec31">
<title>Classifying peekers</title>
<p>Researchers with an eye tracker can filter &#x201C;peeking&#x201D; behavior by making stimulus presentation contingent on fixation or using gaze position to remove trials where peeking occurred post-hoc. Our novel method of ensuring fixation gets around the need for eye tracking, so new methods are needed to filter out &#x201C;peekers.&#x201D; Within our sample (<italic>N</italic> =&#x2009;12), one observer (S9) peeked. S9 had the highest RMSE between the crosshair and cursor and the most frames with unsuccessful tracking, suggesting that peeking and tracking behavior are associated. One may use an RMSE criterion to predict peeking from tracking behavior (we thank Reviewer 2 for this suggestion), but more data would be required to warrant this a valid approach for classifying participants as peekers and non-peekers. In our data, post-hoc omitting the 20% of observers with the highest crosshair-cursor RMSE effectively eliminates peekers. However, toward the goal of assessing crowding as a biomarker, one must consider both subpopulations and analyze them separately.</p>
<p>As peeking reduces crowding distance (<xref ref-type="bibr" rid="ref32">Kurzawski et al., 2023</xref>), S9 had a lower crowding distance when peeking trials were not excluded (Compare EasyEyes home and laboratory with CriticalSpacing.m in <xref ref-type="fig" rid="fig2">Figure 2</xref>). This highlights the need to classify peekers and non-peekers.</p>
</sec>
<sec id="sec32">
<title>Comparing to previous work</title>
<p>Our study is very similar to <xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref>&#x2014;both used CriticalSpacing.m without crosshair tracking, so differences in results cannot be attributed to differences in task load. There were several minor differences in methods: The 50 participants in Kurzawski et al. were psychology graduate students experienced in peripheral testing, while here we recruited 12 adults in the university area with no prior experience in psychophysical testing. Here, the geometric mean crowding threshold and the test&#x2013;retest standard deviation are slightly higher than reported by Kurzawski et al. (geometric mean of 3.5 vs. 2.5&#x2009;deg; test&#x2013;retest standard deviation of 0.16 vs. 0.11). The more experienced observers had log thresholds with lower mean and standard deviation. A closer look reveals that the test&#x2013;retest ratio is lower for naive participants (<xref ref-type="table" rid="tab2">Table 2</xref>), indicating that they improved more from the first to the second threshold measurement. This is consistent with previous accounts of the effect of practice on lowering crowding distance (<xref ref-type="bibr" rid="ref7">Chung, 2007</xref>). Despite these differences between the current study and Kurzawski et al., the consistency across results between methods is high, and individual differences in crowding distance are conserved.</p>
</sec>
<sec id="sec33">
<title>Why measure crowding (online)?</title>
<p>Both crowding distance and acuity are roughly proportional to eccentricity (<xref ref-type="bibr" rid="ref3">Bouma, 1970</xref>) and thus are similarly sensitive to errors in fixation. We are not aware of any test that is more sensitive to eccentricity. Ophthalmology and optometry clinics routinely measure acuity. Here, we explore the possibility that they might find it worthwhile to also measure crowding. Foveal acuity determines the smallest text size that can be read at a certain eccentricity, and peripheral crowding puts an upper limit on reading speed (<xref ref-type="bibr" rid="ref53">Pelli et al., 2007</xref>). <xref ref-type="bibr" rid="ref32">Kurzawski et al. (2023)</xref> found hardly any correlation (<italic>r</italic> =&#x2009;0.15) between foveal and peripheral crowding. Because of its sensitivity to eccentricity and its potential clinical utility, peripheral crowding is a suitable measurement to validate EasyEyes.</p>
<p>From a scientific point of view, accurate fixation for online vision testing enabled by EasyEyes will help to scale up our study of crowding as a promising biomarker of the development and health of the visual cortex. Crowding is correlated with dyslexia (<xref ref-type="bibr" rid="ref34">Kwon et al., 2007</xref>) and can be measured years before the child learns to read (<xref ref-type="bibr" rid="ref52">Pelli and Tillman, 2008</xref>). Besides this, online testing will facilitate cross-sectional and longitudinal surveys of crowding and related measures. Based on its correlation with dyslexia, we also anticipate a correlation between crowding and reading speed, and that pre-literate crowding might predict later reading speed.</p>
</sec>
<sec id="sec34">
<title>Quality of online data</title>
<p>There have been many evaluations of online testing data quality. Some of these reports find comparable data quality between online and in-lab studies (e.g., <xref ref-type="bibr" rid="ref16">Goodman et al., 2013</xref>; <xref ref-type="bibr" rid="ref18">Gureckis et al., 2016</xref>). Others identify serious problems with online data (e.g., <xref ref-type="bibr" rid="ref43">McGonagle, 2015</xref>; <xref ref-type="bibr" rid="ref60">Smith et al., 2016</xref>). To deal with this, it is often recommended to include tests to screen out observers who are not fully engaged. Fortunately, threshold tasks are good at screening out non-attentive participants, as they yield very high thresholds, unless an observer is attending reliably (<xref ref-type="bibr" rid="ref15">Freeman et al., 2013</xref>; <xref ref-type="bibr" rid="ref41">Majaj et al., 2015</xref>). Answering an easy trial incorrectly tends to produce a high threshold estimate that stands out as an outlier. Furthermore, the crosshair tracking task requires successful tracking for target presentation, which demands full attention.</p>
</sec>
<sec id="sec35">
<title>Why test online?</title>
<p>Online testing allows researchers to test hundreds or thousands of participants in a day, recruit diverse and special populations, and screen underserved populations. As online vision testing gains popularity, a new generation of testing software [e.g., jsPsych (<xref ref-type="bibr" rid="ref9">de Leeuw, 2015</xref>), lab.js (<xref ref-type="bibr" rid="ref22">Henninger et al., 2019</xref>), PsychoJS (<xref ref-type="bibr" rid="ref55">Pitiot et al., 2017</xref>), Gorilla (<xref ref-type="bibr" rid="ref2">Anwyl-Irvine et al., 2020</xref>), and OpenSesame (<xref ref-type="bibr" rid="ref42">Math&#x00F4;t et al., 2012</xref>)] makes it easier to test online than in the laboratory. However, established software lacks the possibility of using gaze tracking to achieve precise fixation&#x2014;a requirement for most vision tests. Using the cursor to track a moving crosshair, EasyEyes delivers precise fixation&#x2014;and the same thresholds online as we previously have measured in the lab. Our study shows that EasyEyes is a promising tool for lab-quality online vision testing. Despite other differences, such as the absence of supervision, diversity of equipment, and domestic distractions, EasyEyes achieves precise peripheral, fixation-dependent measurements that so far could only be obtained in the laboratory.</p>
</sec>
</sec>
<sec sec-type="conclusions" id="sec36">
<title>Conclusion</title>
<p>Cursor tracking of a moving crosshair yields accurate fixation (RMSE of 0.6 deg). This method results in crowding thresholds equivalent to those measured in the lab with EyeLink 1000 gaze tracking. This trick facilitates online testing of any fixation-dependent measures. EasyEyes enables fixation-dependent measurements online, for easy testing of larger and more diverse populations.</p>
</sec>
<sec sec-type="data-availability" id="sec37">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found at: <ext-link xlink:href="https://osf.io/u6gdj/" ext-link-type="uri">https://osf.io/u6gdj/</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="sec38">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Committee on Activities Involving Human Subjects. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="sec39">
<title>Author contributions</title>
<p>JK: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Resources, Software, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. MP: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Resources, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. AB: Resources, Software, Writing &#x2013; review &#x0026; editing. NH: Software, Writing &#x2013; review &#x0026; editing, Resources. SL: Software, Writing &#x2013; review &#x0026; editing. NM: Conceptualization, Funding acquisition, Supervision, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. DP: Conceptualization, Funding acquisition, Project administration, Resources, Software, Supervision, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="sec40">
<title>Funding</title>
<p>This research was funded by NIH grant R01-EY027964 to DP and R01-EY031446 to NM. It was also supported by an NIH core vision grant P30-EY013079 and by a Marie Sk&#x0142;odowska-Curie individual fellowship by the European Commission (898520) to NH. Development of the EasyEyes software was developed in part with funds provided by Meta through a sponsored research agreement.</p>
</sec>
<sec sec-type="COI-statement" id="sec41">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec42">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fnhum.2023.1255465/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fnhum.2023.1255465/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="fn0001">
<p><sup>1</sup><ext-link xlink:href="https://easyeyes.app/" ext-link-type="uri">https://easyeyes.app/</ext-link></p>
</fn>
<fn id="fn0002">
<p><sup>2</sup><ext-link xlink:href="https://pavlovia.org/" ext-link-type="uri">https://pavlovia.org/</ext-link></p>
</fn>
<fn id="fn0003">
<p><sup>3</sup><ext-link xlink:href="https://www.prolific.co/" ext-link-type="uri">https://www.prolific.co/</ext-link></p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anton-Erxleben</surname> <given-names>K.</given-names></name> <name><surname>Carrasco</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Attentional enhancement of spatial resolution: linking behavioural and neurophysiological evidence</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>14</volume>, <fpage>188</fpage>&#x2013;<lpage>200</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nrn3443</pub-id>, PMID: <pub-id pub-id-type="pmid">23422910</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Anwyl-Irvine</surname> <given-names>A. L.</given-names></name> <name><surname>Massonni&#x00E9;</surname> <given-names>J.</given-names></name> <name><surname>Flitton</surname> <given-names>A.</given-names></name> <name><surname>Kirkham</surname> <given-names>N.</given-names></name> <name><surname>Evershed</surname> <given-names>J. K.</given-names></name></person-group> (<year>2020</year>). <article-title>Gorilla in our midst: an online behavioral experiment builder</article-title>. <source>Behav. Res. Methods</source> <volume>52</volume>, <fpage>388</fpage>&#x2013;<lpage>407</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-019-01237-x</pub-id>, PMID: <pub-id pub-id-type="pmid">31016684</pub-id></citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bouma</surname> <given-names>H.</given-names></name></person-group> (<year>1970</year>). <article-title>Interaction effects in parafoveal letter recognition</article-title>. <source>Nature</source> <volume>226</volume>:<fpage>5241</fpage>, <fpage>177</fpage>&#x2013;<lpage>178</lpage>. doi: <pub-id pub-id-type="doi">10.1038/226177a0</pub-id>, PMID: <pub-id pub-id-type="pmid">5437004</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bouma</surname> <given-names>H.</given-names></name></person-group> (<year>1973</year>). <article-title>Visual interference in the parafoveal recognition of initial and final letters of words</article-title>. <source>Vis. Res.</source> <volume>13</volume>, <fpage>767</fpage>&#x2013;<lpage>782</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0042-6989(73)90041-2</pub-id>, PMID: <pub-id pub-id-type="pmid">4706350</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brainard</surname> <given-names>D. H.</given-names></name></person-group> (<year>1997</year>). <article-title>The psychophysics toolbox</article-title>. <source>Spat. Vis.</source> <volume>10</volume>, <fpage>433</fpage>&#x2013;<lpage>436</lpage>. doi: <pub-id pub-id-type="doi">10.1163/156856897X00357</pub-id></citation></ref>
<ref id="ref6"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>M. C.</given-names></name> <name><surname>Anderson</surname> <given-names>J. R.</given-names></name> <name><surname>Sohn</surname> <given-names>M. H.</given-names></name></person-group> (<year>2001</year>). What can a mouse cursor tell us more? Correlation of eye/mouse movements on web browsing. <italic>CHI &#x2018;01 Extended Abstracts on Human Factors in Computing Systems</italic>, 281&#x2013;282. Available at: <ext-link xlink:href="https://doi.org/10.1145/634067.634234" ext-link-type="uri">https://doi.org/10.1145/634067.634234</ext-link></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chung</surname> <given-names>S. T. L.</given-names></name></person-group> (<year>2007</year>). <article-title>Learning to identify crowded letters: does it improve reading speed?</article-title> <source>Vis. Res.</source> <volume>47</volume>, <fpage>3150</fpage>&#x2013;<lpage>3159</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.visres.2007.08.017</pub-id>, PMID: <pub-id pub-id-type="pmid">17928026</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Danion</surname> <given-names>F. R.</given-names></name> <name><surname>Flanagan</surname> <given-names>J. R.</given-names></name></person-group> (<year>2018</year>). <article-title>Different gaze strategies during eye versus hand tracking of a moving target</article-title>. <source>Sci. Rep.</source> <volume>8</volume>:<fpage>10059</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-018-28434-6</pub-id>, PMID: <pub-id pub-id-type="pmid">29968806</pub-id></citation></ref>
<ref id="ref9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>de Leeuw</surname> <given-names>J. R.</given-names></name></person-group> (<year>2015</year>). <article-title>jsPsych: a JavaScript library for creating behavioral experiments in a web browser</article-title>. <source>Behav. Res. Methods</source> <volume>47</volume>, <fpage>1</fpage>&#x2013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-014-0458-y</pub-id>, PMID: <pub-id pub-id-type="pmid">24683129</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Elliott</surname> <given-names>D. B.</given-names></name> <name><surname>Whitaker</surname> <given-names>D.</given-names></name> <name><surname>Bonette</surname> <given-names>L.</given-names></name></person-group> (<year>1990</year>). <article-title>Differences in the legibility of letters at contrast threshold using the Pelli-Robson chart</article-title>. <source>Ophthalmic Physiol. Opt.</source> <volume>10</volume>, <fpage>323</fpage>&#x2013;<lpage>326</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1475-1313.1990.tb00877.x</pub-id>, PMID: <pub-id pub-id-type="pmid">2263364</pub-id></citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Engbert</surname> <given-names>R.</given-names></name></person-group> (<year>2006</year>). <article-title>Microsaccades: a microcosm for research on oculomotor control, attention, and visual perception</article-title>. <source>Prog. Brain Res.</source> <volume>154</volume>, <fpage>177</fpage>&#x2013;<lpage>192</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0079-6123(06)54009-9</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="other"><person-group person-group-type="author"><collab id="coll1">Eye Tracking 101</collab></person-group>. (<year>2022</year>). <italic>What is it &#x0026; how does it work in real life? Eyeware,</italic> March 3. Available at: <ext-link xlink:href="https://eyeware.tech/blog/what-is-eye-tracking/" ext-link-type="uri">https://eyeware.tech/blog/what-is-eye-tracking/</ext-link></citation></ref>
<ref id="ref13"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferreira</surname> <given-names>F.</given-names></name> <name><surname>Apel</surname> <given-names>J.</given-names></name> <name><surname>Henderson</surname> <given-names>J. M.</given-names></name></person-group> (<year>2008</year>). <article-title>Taking a new look at looking at nothing</article-title>. <source>Trends Cogn. Sci.</source> <volume>12</volume>, <fpage>405</fpage>&#x2013;<lpage>410</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2008.07.007</pub-id>, PMID: <pub-id pub-id-type="pmid">18805041</pub-id></citation></ref>
<ref id="ref14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Freeman</surname> <given-names>J.</given-names></name> <name><surname>Simoncelli</surname> <given-names>E. P.</given-names></name></person-group> (<year>2011</year>). <article-title>Metamers of the ventral stream</article-title>. <source>Nat. Neurosci.</source> <volume>14</volume>, <fpage>1195</fpage>&#x2013;<lpage>1201</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.2889</pub-id>, PMID: <pub-id pub-id-type="pmid">21841776</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Freeman</surname> <given-names>J.</given-names></name> <name><surname>Ziemba</surname> <given-names>C. M.</given-names></name> <name><surname>Heeger</surname> <given-names>D. J.</given-names></name> <name><surname>Simoncelli</surname> <given-names>E. P.</given-names></name> <name><surname>Movshon</surname> <given-names>J. A.</given-names></name></person-group> (<year>2013</year>). <article-title>A functional and perceptual signature of the second visual area in primates</article-title>. <source>Nat. Neurosci.</source> <volume>16</volume>, <fpage>974</fpage>&#x2013;<lpage>981</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.3402</pub-id>, PMID: <pub-id pub-id-type="pmid">23685719</pub-id></citation></ref>
<ref id="ref16"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Goodman</surname> <given-names>J. K.</given-names></name> <name><surname>Cryder</surname> <given-names>C. E.</given-names></name> <name><surname>Cheema</surname> <given-names>A.</given-names></name></person-group> (<year>2013</year>). <article-title>Data collection in a flat world: the strengths and weaknesses of mechanical Turk samples</article-title>. <source>J. Behav. Decis. Mak.</source> <volume>26</volume>, <fpage>213</fpage>&#x2013;<lpage>224</lpage>. doi: <pub-id pub-id-type="doi">10.1002/bdm.1753</pub-id></citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Grootswagers</surname> <given-names>T.</given-names></name></person-group> (<year>2020</year>). <article-title>A primer on running human behavioural experiments online</article-title>. <source>Behav. Res. Methods</source> <volume>52</volume>, <fpage>2283</fpage>&#x2013;<lpage>2286</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-020-01395-3</pub-id>, PMID: <pub-id pub-id-type="pmid">32291730</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gureckis</surname> <given-names>T. M.</given-names></name> <name><surname>Martin</surname> <given-names>J.</given-names></name> <name><surname>McDonnell</surname> <given-names>J.</given-names></name> <name><surname>Rich</surname> <given-names>A. S.</given-names></name> <name><surname>Markant</surname> <given-names>D.</given-names></name> <name><surname>Coenen</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>psiTurk: an open-source framework for conducting replicable behavioral experiments online</article-title>. <source>Behav. Res. Methods</source> <volume>48</volume>, <fpage>829</fpage>&#x2013;<lpage>842</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-015-0642-8</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Guzman-Martinez</surname> <given-names>E.</given-names></name> <name><surname>Leung</surname> <given-names>P.</given-names></name> <name><surname>Franconeri</surname> <given-names>S.</given-names></name> <name><surname>Grabowecky</surname> <given-names>M.</given-names></name> <name><surname>Suzuki</surname> <given-names>S.</given-names></name></person-group> (<year>2009</year>). <article-title>Rapid eye-fixation training without eye tracking</article-title>. <source>Psychon. Bull. Rev.</source> <volume>16</volume>, <fpage>491</fpage>&#x2013;<lpage>496</lpage>. doi: <pub-id pub-id-type="doi">10.3758/PBR.16.3.491</pub-id>, PMID: <pub-id pub-id-type="pmid">19451374</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hanning</surname> <given-names>N. M.</given-names></name> <name><surname>Deubel</surname> <given-names>H.</given-names></name></person-group> (<year>2022</year>). <article-title>A dynamic 1/f noise protocol to assess visual attention without biasing perceptual processing</article-title>. <source>Behav. Res. Methods</source> <volume>55</volume>, <fpage>2583</fpage>&#x2013;<lpage>2594</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-022-01916-2</pub-id>, PMID: <pub-id pub-id-type="pmid">35915360</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hanning</surname> <given-names>N. M.</given-names></name> <name><surname>Himmelberg</surname> <given-names>M. M.</given-names></name> <name><surname>Carrasco</surname> <given-names>M.</given-names></name></person-group> (<year>2022</year>). <article-title>Presaccadic attention enhances contrast sensitivity, but not at the upper vertical meridian</article-title>. <source>iScience</source> <volume>25</volume>:<fpage>103851</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.isci.2022.103851</pub-id>, PMID: <pub-id pub-id-type="pmid">35198902</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Henninger</surname> <given-names>F.</given-names></name> <name><surname>Shevchenko</surname> <given-names>Y.</given-names></name> <name><surname>Mertens</surname> <given-names>U.</given-names></name> <name><surname>Kieslich</surname> <given-names>P. J.</given-names></name> <name><surname>Hilbig</surname> <given-names>B. E.</given-names></name></person-group> (<year>2019</year>). <italic>Lab.Js: a free, open, online experiment builder</italic> [computer software]. Zenodo. Available at: <ext-link xlink:href="https://doi.org/10.5281/zenodo.2775942" ext-link-type="uri">https://doi.org/10.5281/zenodo.2775942</ext-link></citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Himmelberg</surname> <given-names>M. M.</given-names></name> <name><surname>Winawer</surname> <given-names>J.</given-names></name> <name><surname>Carrasco</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Polar angle asymmetries in visual perception and neural architecture</article-title>. <source>Trends Neurosci.</source> <volume>46</volume>, <fpage>445</fpage>&#x2013;<lpage>458</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tins.2023.03.006</pub-id>, PMID: <pub-id pub-id-type="pmid">37031051</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Huang</surname> <given-names>M. X.</given-names></name> <name><surname>Kwok</surname> <given-names>T. C. K.</given-names></name> <name><surname>Ngai</surname> <given-names>G.</given-names></name> <name><surname>Chan</surname> <given-names>S. C. F.</given-names></name> <name><surname>Leong</surname> <given-names>H. V.</given-names></name></person-group> (<year>2016</year>). Building a personalized, auto-calibrating eye tracker from user interactions. <italic>Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems</italic>, 5169&#x2013;5179.</citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jana</surname> <given-names>S.</given-names></name> <name><surname>Gopal</surname> <given-names>A.</given-names></name> <name><surname>Murthy</surname> <given-names>A.</given-names></name></person-group> (<year>2017</year>). <article-title>A computational framework for understanding eye&#x2013;hand coordination</article-title>. <source>J. Indian Inst. Sci.</source> <volume>97</volume>, <fpage>543</fpage>&#x2013;<lpage>554</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s41745-017-0054-0</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Joo</surname> <given-names>S. J.</given-names></name> <name><surname>White</surname> <given-names>A. L.</given-names></name> <name><surname>Strodtman</surname> <given-names>D. J.</given-names></name> <name><surname>Yeatman</surname> <given-names>J. D.</given-names></name></person-group> (<year>2018</year>). <article-title>Optimizing text for an individual&#x2019;s visual system: the contribution of visual crowding to reading difficulties</article-title>. <source>Cortex</source> <volume>103</volume>, <fpage>291</fpage>&#x2013;<lpage>301</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2018.03.013</pub-id>, PMID: <pub-id pub-id-type="pmid">29679920</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kartynnik</surname> <given-names>Y.</given-names></name> <name><surname>Ablavatski</surname> <given-names>A.</given-names></name> <name><surname>Grishchenko</surname> <given-names>I.</given-names></name> <name><surname>Grundmann</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Real-time facial surface geometry from monocular video on Mobile GPUs</article-title>. <source>arXiv</source>. doi: <pub-id pub-id-type="doi">10.48550/arXiv.1907.06724</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koken</surname> <given-names>P. W.</given-names></name> <name><surname>Erkelens</surname> <given-names>C. J.</given-names></name></person-group> (<year>1992</year>). <article-title>Influences of hand movements on eye movements in tracking tasks in man</article-title>. <source>Exp. Brain Res.</source> <volume>88</volume>, <fpage>657</fpage>&#x2013;<lpage>664</lpage>. doi: <pub-id pub-id-type="doi">10.1007/BF00228195</pub-id>, PMID: <pub-id pub-id-type="pmid">1587324</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koo</surname> <given-names>T. K.</given-names></name> <name><surname>Li</surname> <given-names>M. Y.</given-names></name></person-group> (<year>2016</year>). <article-title>A guideline of selecting and reporting intraclass correlation coefficients for reliability research</article-title>. <source>J. Chiropr. Med.</source> <volume>15</volume>, <fpage>155</fpage>&#x2013;<lpage>163</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jcm.2016.02.012</pub-id>, PMID: <pub-id pub-id-type="pmid">27330520</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kooi</surname> <given-names>F.</given-names></name> <name><surname>Toet</surname> <given-names>A.</given-names></name> <name><surname>Tripathy</surname> <given-names>S.</given-names></name> <name><surname>Levi</surname> <given-names>D.</given-names></name></person-group> (<year>1994</year>). <article-title>The effect of similarity and duration on spatial interaction in peripheral vision</article-title>. <source>Spat. Vis.</source> <volume>8</volume>, <fpage>255</fpage>&#x2013;<lpage>279</lpage>. doi: <pub-id pub-id-type="doi">10.1163/156856894X00350</pub-id>, PMID: <pub-id pub-id-type="pmid">7993878</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kreyenmeier</surname> <given-names>P.</given-names></name> <name><surname>Deubel</surname> <given-names>H.</given-names></name> <name><surname>Hanning</surname> <given-names>N. M.</given-names></name></person-group> (<year>2020</year>). <article-title>Theory of visual attention (TVA) in action: assessing premotor attention in simultaneous eye-hand movements</article-title>. <source>Cortex</source> <volume>133</volume>, <fpage>133</fpage>&#x2013;<lpage>148</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cortex.2020.09.020</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kurzawski</surname> <given-names>J. W.</given-names></name> <name><surname>Burchell</surname> <given-names>A.</given-names></name> <name><surname>Thapa</surname> <given-names>D.</given-names></name> <name><surname>Winawer</surname> <given-names>J.</given-names></name> <name><surname>Majaj</surname> <given-names>N. J.</given-names></name> <name><surname>Pelli</surname> <given-names>D. G.</given-names></name></person-group> (<year>2023</year>). <article-title>The Bouma law accounts for crowding in fifty observers</article-title>. <source>bioRxiv</source>. <volume>6</volume>, <fpage>1</fpage>&#x2013;<lpage>34</lpage>. doi: <pub-id pub-id-type="doi">10.1101/2021.04.12.439570</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kwak</surname> <given-names>Y.</given-names></name> <name><surname>Hanning</surname> <given-names>N. M.</given-names></name> <name><surname>Carrasco</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Presaccadic attention sharpens visual acuity</article-title>. <source>Sci. Rep.</source> <volume>13</volume>:<fpage>2981</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-023-29990-2</pub-id>, PMID: <pub-id pub-id-type="pmid">36807313</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kwon</surname> <given-names>M.</given-names></name> <name><surname>Legge</surname> <given-names>G. E.</given-names></name> <name><surname>Dubbels</surname> <given-names>B. R.</given-names></name></person-group> (<year>2007</year>). <article-title>Developmental changes in the visual span for reading</article-title>. <source>Vis. Res.</source> <volume>47</volume>, <fpage>2889</fpage>&#x2013;<lpage>2900</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.visres.2007.08.002</pub-id>, PMID: <pub-id pub-id-type="pmid">17845810</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Land</surname> <given-names>M. F.</given-names></name> <name><surname>Hayhoe</surname> <given-names>M.</given-names></name></person-group> (<year>2001</year>). <article-title>In what ways do eye movements contribute to everyday activities?</article-title> <source>Vis. Res.</source> <volume>41</volume>, <fpage>3559</fpage>&#x2013;<lpage>3565</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0042-6989(01)00102-X</pub-id>, PMID: <pub-id pub-id-type="pmid">11718795</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Levi</surname> <given-names>D. M.</given-names></name></person-group> (<year>2008</year>). <article-title>Crowding&#x2014;an essential bottleneck for object recognition: a mini-review</article-title>. <source>Vis. Res.</source> <volume>48</volume>, <fpage>635</fpage>&#x2013;<lpage>654</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.visres.2007.12.009</pub-id>, PMID: <pub-id pub-id-type="pmid">18226828</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Levi</surname> <given-names>D. M.</given-names></name> <name><surname>Carney</surname> <given-names>T.</given-names></name></person-group> (<year>2009</year>). <article-title>Crowding in peripheral vision: why bigger is better</article-title>. <source>Curr. Biol.</source> <volume>19</volume>, <fpage>1988</fpage>&#x2013;<lpage>1993</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cub.2009.09.056</pub-id>, PMID: <pub-id pub-id-type="pmid">19853450</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Levi</surname> <given-names>D. M.</given-names></name> <name><surname>Song</surname> <given-names>S.</given-names></name> <name><surname>Pelli</surname> <given-names>D. G.</given-names></name></person-group> (<year>2007</year>). <article-title>Amblyopic reading is crowded</article-title>. <source>J. Vis.</source> <volume>7</volume>, <fpage>21</fpage>&#x2013;<lpage>17</lpage>. doi: <pub-id pub-id-type="doi">10.1167/7.2.21</pub-id>, PMID: <pub-id pub-id-type="pmid">18217836</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Joo</surname> <given-names>S. J.</given-names></name> <name><surname>Yeatman</surname> <given-names>J. D.</given-names></name> <name><surname>Reinecke</surname> <given-names>K.</given-names></name></person-group> (<year>2020</year>). <article-title>Controlling for participants&#x2019; viewing distance in large-scale, psychophysical online experiments using a virtual chinrest</article-title>. <source>Sci. Rep.</source> <volume>10</volume>:<fpage>904</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-019-57204-1</pub-id>, PMID: <pub-id pub-id-type="pmid">31969579</pub-id></citation></ref>
<ref id="ref40"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Liebling</surname> <given-names>D. J.</given-names></name> <name><surname>Dumais</surname> <given-names>S. T.</given-names></name></person-group> (<year>2014</year>). Gaze and mouse coordination in everyday work. <italic>Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing: Adjunct Publication</italic>, 1141&#x2013;1150. Available at: <ext-link xlink:href="https://doi.org/10.1145/2638728.2641692" ext-link-type="uri">https://doi.org/10.1145/2638728.2641692</ext-link></citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Majaj</surname> <given-names>N. J.</given-names></name> <name><surname>Hong</surname> <given-names>H.</given-names></name> <name><surname>Solomon</surname> <given-names>E. A.</given-names></name> <name><surname>DiCarlo</surname> <given-names>J. J.</given-names></name></person-group> (<year>2015</year>). <article-title>Simple learned weighted sums of inferior temporal neuronal firing rates accurately predict human Core object recognition performance</article-title>. <source>J. Neurosci.</source> <volume>35</volume>, <fpage>13402</fpage>&#x2013;<lpage>13418</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.5181-14.2015</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Math&#x00F4;t</surname> <given-names>S.</given-names></name> <name><surname>Schreij</surname> <given-names>D.</given-names></name> <name><surname>Theeuwes</surname> <given-names>J.</given-names></name></person-group> (<year>2012</year>). <article-title>OpenSesame: an open-source, graphical experiment builder for the social sciences</article-title>. <source>Behav. Res. Methods</source> <volume>44</volume>, <fpage>314</fpage>&#x2013;<lpage>324</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-011-0168-7</pub-id>, PMID: <pub-id pub-id-type="pmid">22083660</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>McGonagle</surname> <given-names>A. K.</given-names></name></person-group> (<year>2015</year>). <article-title>Participant motivation: a critical consideration</article-title>. <source>Ind. Organ. Psychol.</source> <volume>8</volume>, <fpage>208</fpage>&#x2013;<lpage>214</lpage>. doi: <pub-id pub-id-type="doi">10.1017/iop.2015.27</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Morey</surname> <given-names>R. D.</given-names></name> <name><surname>Rouder</surname> <given-names>J. N.</given-names></name> <name><surname>Jamil</surname> <given-names>T.</given-names></name> <name><surname>Urbanek</surname> <given-names>S.</given-names></name> <name><surname>Forner</surname> <given-names>K.</given-names></name> <name><surname>Ly</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <italic>BayesFactor: computation of Bayes factors for common designs</italic> (0.9.12-4.5) [computer software]. Available at: <ext-link xlink:href="https://cran.r-project.org/web/packages/BayesFactor/index.html" ext-link-type="uri">https://cran.r-project.org/web/packages/BayesFactor/index.html</ext-link></citation></ref>
<ref id="ref45"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Najemnik</surname> <given-names>J.</given-names></name> <name><surname>Geisler</surname> <given-names>W. S.</given-names></name></person-group> (<year>2008</year>). <article-title>Eye movement statistics in humans are consistent with an optimal search strategy</article-title>. <source>J. Vis.</source> <volume>8</volume>, <fpage>4</fpage>&#x2013;<lpage>414</lpage>. doi: <pub-id pub-id-type="doi">10.1167/8.3.4</pub-id>, PMID: <pub-id pub-id-type="pmid">18484810</pub-id></citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Niehorster</surname> <given-names>D. C.</given-names></name> <name><surname>Siu</surname> <given-names>W. W. F.</given-names></name> <name><surname>Li</surname> <given-names>L.</given-names></name></person-group> (<year>2015</year>). <article-title>Manual tracking enhances smooth pursuit eye movements</article-title>. <source>J. Vis.</source> <volume>15</volume>:<fpage>11</fpage>. doi: <pub-id pub-id-type="doi">10.1167/15.15.11</pub-id>, PMID: <pub-id pub-id-type="pmid">26605840</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Otero-Millan</surname> <given-names>J.</given-names></name> <name><surname>Macknik</surname> <given-names>S. L.</given-names></name> <name><surname>Langston</surname> <given-names>R. E.</given-names></name> <name><surname>Martinez-Conde</surname> <given-names>S.</given-names></name></person-group> (<year>2013</year>). <article-title>An oculomotor continuum from exploration to fixation</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A.</source> <volume>110</volume>, <fpage>6175</fpage>&#x2013;<lpage>6180</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1222715110</pub-id>, PMID: <pub-id pub-id-type="pmid">23533278</pub-id></citation></ref>
<ref id="ref48"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Palan</surname> <given-names>S.</given-names></name> <name><surname>Schitter</surname> <given-names>C.</given-names></name></person-group> (<year>2018</year>). <article-title>Prolific.Ac&#x2014;a subject pool for online experiments</article-title>. <source>J. Behav. Exp. Financ.</source> <volume>17</volume>, <fpage>22</fpage>&#x2013;<lpage>27</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jbef.2017.12.004</pub-id></citation></ref>
<ref id="ref49"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Papoutsaki</surname> <given-names>A.</given-names></name></person-group> (<year>2015</year>). Scalable webcam eye tracking by learning from user interactions. <italic>Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems</italic>, 219&#x2013;222. Available at: <ext-link xlink:href="https://doi.org/10.1145/2702613.2702627" ext-link-type="uri">https://doi.org/10.1145/2702613.2702627</ext-link></citation></ref>
<ref id="ref50"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Papoutsaki</surname> <given-names>A.</given-names></name> <name><surname>Sangkloy</surname> <given-names>P.</given-names></name> <name><surname>Laskey</surname> <given-names>J.</given-names></name> <name><surname>Daskalova</surname> <given-names>N.</given-names></name> <name><surname>Huang</surname> <given-names>J.</given-names></name> <name><surname>Hays</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <italic>WebGazer: Scalable webcam eye tracking using user interactions</italic>.</citation></ref>
<ref id="ref51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pelli</surname> <given-names>D. G.</given-names></name> <name><surname>Palomares</surname> <given-names>M.</given-names></name> <name><surname>Majaj</surname> <given-names>N. J.</given-names></name></person-group> (<year>2004</year>). <article-title>Crowding is unlike ordinary masking: distinguishing feature integration from detection</article-title>. <source>J. Vis.</source> <volume>4</volume>, <fpage>1136</fpage>&#x2013;<lpage>1169</lpage>. doi: <pub-id pub-id-type="doi">10.1167/4.12.12</pub-id>, PMID: <pub-id pub-id-type="pmid">15669917</pub-id></citation></ref>
<ref id="ref52"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pelli</surname> <given-names>D. G.</given-names></name> <name><surname>Tillman</surname> <given-names>K. A.</given-names></name></person-group> (<year>2008</year>). <article-title>The uncrowded window of object recognition</article-title>. <source>Nat. Neurosci.</source> <volume>11</volume>, <fpage>1129</fpage>&#x2013;<lpage>1135</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.2187</pub-id>, PMID: <pub-id pub-id-type="pmid">18828191</pub-id></citation></ref>
<ref id="ref53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pelli</surname> <given-names>D. G.</given-names></name> <name><surname>Tillman</surname> <given-names>K. A.</given-names></name> <name><surname>Freeman</surname> <given-names>J.</given-names></name> <name><surname>Su</surname> <given-names>M.</given-names></name> <name><surname>Berger</surname> <given-names>T. D.</given-names></name> <name><surname>Majaj</surname> <given-names>N. J.</given-names></name></person-group> (<year>2007</year>). <article-title>Crowding and eccentricity determine reading rate</article-title>. <source>J. Vis.</source> <volume>7</volume>:<fpage>20</fpage>. doi: <pub-id pub-id-type="doi">10.1167/7.2.20</pub-id></citation></ref>
<ref id="ref54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pelli</surname> <given-names>D. G.</given-names></name> <name><surname>Waugh</surname> <given-names>S. J.</given-names></name> <name><surname>Martelli</surname> <given-names>M.</given-names></name> <name><surname>Crutch</surname> <given-names>S. J.</given-names></name> <name><surname>Primativo</surname> <given-names>S.</given-names></name> <name><surname>Yong</surname> <given-names>K. X.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>A clinical test for visual crowding</article-title>. <source>F1000Research</source> <volume>5</volume>:<fpage>81</fpage>. doi: <pub-id pub-id-type="doi">10.12688/f1000research.7835.1</pub-id></citation></ref>
<ref id="ref55"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Pitiot</surname> <given-names>A.</given-names></name> <name><surname>Agafonov</surname> <given-names>N.</given-names></name> <name><surname>Bakagiannis</surname> <given-names>S.</given-names></name> <name><surname>Pierce</surname> <given-names>J.</given-names></name> <name><surname>Pronk</surname> <given-names>T.</given-names></name> <name><surname>Sogo</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2017</year>). <italic>PsychoJS</italic> [JavaScript]. PsychoPy. Available at: <ext-link xlink:href="https://github.com/psychopy/psychojs" ext-link-type="uri">https://github.com/psychopy/psychojs</ext-link></citation></ref>
<ref id="ref56"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rolfs</surname> <given-names>M.</given-names></name> <name><surname>Schweitzer</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). <article-title>Coupling perception to action through incidental sensory consequences of motor behaviour</article-title>. <source>Nat. Rev. Psychol.</source> <volume>1</volume>, <fpage>112</fpage>&#x2013;<lpage>123</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s44159-021-00015-x</pub-id></citation></ref>
<ref id="ref57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Semmelmann</surname> <given-names>K.</given-names></name> <name><surname>Weigelt</surname> <given-names>S.</given-names></name></person-group> (<year>2018</year>). <article-title>Online webcam-based eye tracking in cognitive science: a first look</article-title>. <source>Behav. Res. Methods</source> <volume>50</volume>, <fpage>451</fpage>&#x2013;<lpage>465</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-017-0913-7</pub-id>, PMID: <pub-id pub-id-type="pmid">28593605</pub-id></citation></ref>
<ref id="ref58"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Slim</surname> <given-names>M. S.</given-names></name> <name><surname>Hartsuiker</surname> <given-names>R. J.</given-names></name></person-group> (<year>2022</year>). <article-title>Moving visual world experiments online? A web-based replication of Dijkgraaf, Hartsuiker, and Duyck (2017) using PCIbex and WebGazer.Js</article-title>. <source>Behav. Res. Methods</source> <volume>55</volume>, <fpage>3786</fpage>&#x2013;<lpage>3804</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-022-01989-z</pub-id></citation></ref>
<ref id="ref59"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sloan</surname> <given-names>L. L.</given-names></name> <name><surname>Rowland</surname> <given-names>W. M.</given-names></name> <name><surname>Altman</surname> <given-names>A.</given-names></name></person-group> (<year>1952</year>). <article-title>Comparison of three types of test target for the measurement of visual acuity</article-title>. <source>Q. Rev. Ophthalmol.</source> <volume>8</volume>, <fpage>4</fpage>&#x2013;<lpage>16</lpage>.</citation></ref>
<ref id="ref60"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Smith</surname> <given-names>S. M.</given-names></name> <name><surname>Roster</surname> <given-names>C. A.</given-names></name> <name><surname>Golden</surname> <given-names>L. L.</given-names></name> <name><surname>Albaum</surname> <given-names>G. S.</given-names></name></person-group> (<year>2016</year>). <article-title>A multi-group analysis of online survey respondent data quality: comparing a regular USA consumer panel to MTurk samples</article-title>. <source>J. Bus. Res.</source> <volume>69</volume>, <fpage>3139</fpage>&#x2013;<lpage>3148</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jbusres.2015.12.002</pub-id></citation></ref>
<ref id="ref61"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Strasburger</surname> <given-names>H.</given-names></name></person-group> (<year>2020</year>). <article-title>Seven myths on crowding and peripheral vision</article-title>. <source>I-Perception</source> <volume>11</volume>:<fpage>2041669520913052</fpage>. doi: <pub-id pub-id-type="doi">10.1177/2041669520913052</pub-id>, PMID: <pub-id pub-id-type="pmid">32489576</pub-id></citation></ref>
<ref id="ref62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Strasburger</surname> <given-names>H.</given-names></name> <name><surname>Rentschler</surname> <given-names>I.</given-names></name> <name><surname>J&#x00FC;ttner</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>Peripheral vision and pattern recognition: a review</article-title>. <source>J. Vis.</source> <volume>11</volume>:<fpage>13</fpage>. doi: <pub-id pub-id-type="doi">10.1167/11.5.13</pub-id>, PMID: <pub-id pub-id-type="pmid">22207654</pub-id></citation></ref>
<ref id="ref63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stuart</surname> <given-names>J. A.</given-names></name> <name><surname>Burian</surname> <given-names>H. M.</given-names></name></person-group> (<year>1962</year>). <article-title>A study of separation difficulty&#x002A;: its relationship to visual acuity in Normal and amblyopic eyes</article-title>. <source>Am J. Ophthalmol.</source> <volume>53</volume>, <fpage>471</fpage>&#x2013;<lpage>477</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0002-9394(62)94878-X</pub-id></citation></ref>
<ref id="ref64"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Toet</surname> <given-names>A.</given-names></name> <name><surname>Levi</surname> <given-names>D. M.</given-names></name></person-group> (<year>1992</year>). <article-title>The two-dimensional shape of spatial interaction zones in the parafovea</article-title>. <source>Vis. Res.</source> <volume>32</volume>, <fpage>1349</fpage>&#x2013;<lpage>1357</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0042-6989(92)90227-A</pub-id>, PMID: <pub-id pub-id-type="pmid">1455707</pub-id></citation></ref>
<ref id="ref65"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Valliappan</surname> <given-names>N.</given-names></name> <name><surname>Dai</surname> <given-names>N.</given-names></name> <name><surname>Steinberg</surname> <given-names>E.</given-names></name> <name><surname>He</surname> <given-names>J.</given-names></name> <name><surname>Rogers</surname> <given-names>K.</given-names></name> <name><surname>Ramachandran</surname> <given-names>V.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Accelerating eye movement research via accurate and affordable smartphone eye tracking</article-title>. <source>Nat. Commun.</source> <volume>11</volume>:<fpage>4553</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41467-020-18360-5</pub-id>, PMID: <pub-id pub-id-type="pmid">32917902</pub-id></citation></ref>
<ref id="ref66"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wallace</surname> <given-names>J. M.</given-names></name> <name><surname>Chung</surname> <given-names>S. T. L.</given-names></name> <name><surname>Tjan</surname> <given-names>B. S.</given-names></name></person-group> (<year>2017</year>). <article-title>Object crowding in age-related macular degeneration</article-title>. <source>J. Vis.</source> <volume>17</volume>:<fpage>33</fpage>. doi: <pub-id pub-id-type="doi">10.1167/17.1.33</pub-id>, PMID: <pub-id pub-id-type="pmid">28129416</pub-id></citation></ref>
<ref id="ref67"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Watson</surname> <given-names>A. B.</given-names></name> <name><surname>Pelli</surname> <given-names>D. G.</given-names></name></person-group> (<year>1983</year>). <article-title>Quest: a Bayesian adaptive psychometric method</article-title>. <source>Percept. Psychophys.</source> <volume>33</volume>, <fpage>113</fpage>&#x2013;<lpage>120</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03202828</pub-id>, PMID: <pub-id pub-id-type="pmid">6844102</pub-id></citation></ref>
<ref id="ref68"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xia</surname> <given-names>R.</given-names></name> <name><surname>Barnes</surname> <given-names>G.</given-names></name></person-group> (<year>1999</year>). <article-title>Oculomanual coordination in tracking of pseudorandom target motion stimuli</article-title>. <source>J. Mot. Behav.</source> <volume>31</volume>, <fpage>21</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1080/00222899909601889</pub-id>, PMID: <pub-id pub-id-type="pmid">11177617</pub-id></citation></ref>
<ref id="ref69"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>P.</given-names></name> <name><surname>Ehinger</surname> <given-names>K. A.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Finkelstein</surname> <given-names>A.</given-names></name> <name><surname>Kulkarni</surname> <given-names>S. R.</given-names></name> <name><surname>Xiao</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <article-title>TurkerGaze: crowdsourcing saliency with webcam based eye tracking</article-title>. <source>arXiv</source>. doi: <pub-id pub-id-type="doi">10.48550/arXiv.1504.06755</pub-id></citation></ref>
</ref-list>
</back>
</article>