<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2026.1625415</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Anticipatory gaze in a reaching-and-grasping task when target movement direction is uncertain: evidence of statistical learning</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Ekladuce</surname> <given-names>Youssef G.</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/3054562/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Langridge</surname> <given-names>Ryan W.</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<uri xlink:href="http://loop.frontiersin.org/people/1764720/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Marotta</surname> <given-names>Jonathan J.</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<uri xlink:href="http://loop.frontiersin.org/people/69294/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><institution>Perception and Action Lab, Department of Psychology, University of Manitoba</institution>, <city>Winnipeg, MB</city>, <country country="ca">Canada</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Youssef G. Ekladuce, <email xlink:href="mailto:ekladucy@myumanitoba.ca">ekladucy@myumanitoba.ca</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-16">
<day>16</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1625415</elocation-id>
<history>
<date date-type="received">
<day>08</day>
<month>05</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>25</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>26</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Ekladuce, Langridge and Marotta.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Ekladuce, Langridge and Marotta</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-16">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Successful goal-directed actions in dynamic environments depend on the brain&#x2019;s ability to implicitly learn probabilistic regularities, enabling anticipatory behaviors and reducing cognitive demands. Humans can utilize visual and non-visual aspects about the target to predict its direction. We investigated whether individuals could implicitly exploit a target&#x2019;s directional movement history during a grasping task to predict its future direction, under uncertain conditions, lacking any clues about the target&#x2019;s eventual movement. In each trial, a target appeared in the middle of the screen, remained stationary for 2.50 s, then translated horizontally toward the left or the right. Participants first completed a non-biased block of trials, before completing either two rightward or leftward biased blocks. Upon exposure to the biased blocks, participants&#x2019; average gaze shifted toward the direction of the eventual movement of the target prior to its movement, suggesting an adaptation in gaze strategy as the experiment progressed. Later trials showed a greater distance from the target&#x2019;s horizontal midline compared to earlier trials. Furthermore, anticipatory behavior facilitated more efficient reactive gaze adjustments once the target began moving, enabling participants to align their gaze more closely with the target and thus reducing cognitive load. This study highlights the visuomotor system&#x2019;s ability to implicitly use probabilistic patterns, enhancing anticipatory and reactive gaze strategies that improve the execution of goal-directed actions.</p>
</abstract>
<kwd-group>
<kwd>anticipatory gaze</kwd>
<kwd>directionality bias</kwd>
<kwd>reaching-to-grasp</kwd>
<kwd>uncertainty</kwd>
<kwd>visuomotor control</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Natural Sciences and Engineering Research Council of Canada</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/501100000038</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<award-group id="gs2">
<funding-source id="sp2">
<institution-wrap>
<institution>Research Manitoba</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/100008794</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This research was supported by the NSERC Discovery Grant (RGPIN-2018-04964; held by JM) and the Research Manitoba Master&#x2019;s Studentship Award (held by YE).</funding-statement>
</funding-group>
<counts>
<fig-count count="6"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="79"/>
<page-count count="15"/>
<word-count count="12499"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Movement Science</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="S1" sec-type="intro">
<label>1</label>
<title>Introduction</title>
<p>Goal-directed behaviors, such as grasping a moving object, require the brain to process visual information to estimate the object&#x2019;s position and velocity at the time of contact (<xref ref-type="bibr" rid="B11">Carnahan et al., 1993</xref>; <xref ref-type="bibr" rid="B32">Hesse et al., 2016</xref>; <xref ref-type="bibr" rid="B64">Paninski et al., 2004</xref>; <xref ref-type="bibr" rid="B77">Todd, 1981</xref>). For instance, when attempting to catch a ball, a soccer goalie must anticipate the direction that the ball will be kicked and initiate movements accordingly. However, through extensive practice, these anticipatory actions become increasingly automatic, allowing the goalie to exploit probabilistic estimations about the ball&#x2019;s trajectory, thereby optimizing their predictive accuracy (<xref ref-type="bibr" rid="B50">Malla and L&#x00F3;pez-Moliner, 2015</xref>; <xref ref-type="bibr" rid="B52">Mennie et al., 2007</xref>; <xref ref-type="bibr" rid="B74">Spering et al., 2011</xref>). Since our visual environment contains many statistical regularities (<xref ref-type="bibr" rid="B78">Turk-Browne, 2012</xref>), the brain&#x2019;s capacity to implicitly learn these regularities enables the operation of actions, reducing cognitive load and enhancing the likelihood of successful goal-directed behavior, particularly in dynamic and uncertain situations (<xref ref-type="bibr" rid="B51">Masters and Maxwell, 2008</xref>).</p>
<p>The two-visual-stream hypothesis proposes that visual processing involves two functionally independent neural pathways, a dorsal visual stream and a ventral visual stream (<xref ref-type="bibr" rid="B30">Goodale et al., 1991</xref>; <xref ref-type="bibr" rid="B29">Goodale and Milner, 1992</xref>; <xref ref-type="bibr" rid="B27">Goodale, 2011</xref>, <xref ref-type="bibr" rid="B28">2013</xref>). The dorsal visual stream projects to the posterior parietal cortex and processes visual information serving the representations needed for visually guided actions and is largely unconscious. In contrast, the ventral stream, projecting to the inferior temporal cortex, processes information consciously, relying on memory to support stable perceptual representations of the world around us (<xref ref-type="bibr" rid="B30">Goodale et al., 1991</xref>; <xref ref-type="bibr" rid="B29">Goodale and Milner, 1992</xref>; <xref ref-type="bibr" rid="B27">Goodale, 2011</xref>; <xref ref-type="bibr" rid="B35">Jakobson et al., 1991</xref>; <xref ref-type="bibr" rid="B53">Milner and Goodale, 2006</xref>; <xref ref-type="bibr" rid="B54">Miyashita, 1993</xref>). Despite their functional independence, research suggests that various types of goal-directed behavior require the communication of the dorsal and ventral visual streams. These behaviors include tool use (<xref ref-type="bibr" rid="B7">Brandi et al., 2014</xref>; <xref ref-type="bibr" rid="B9">Budisavljevic et al., 2018</xref>; <xref ref-type="bibr" rid="B20">Dressing et al., 2018</xref>), delayed grasping (<xref ref-type="bibr" rid="B14">Cohen et al., 2009</xref>; <xref ref-type="bibr" rid="B72">Singhal et al., 2013</xref>), and engaging with two-dimensional objects (<xref ref-type="bibr" rid="B24">Freud et al., 2018</xref>). These findings are significant as they suggest that in certain contexts, the non-conscious dorsal stream can access ventrally mediated conscious memory systems, gaining information not typically accessible to the dorsal stream alone, but that may aid visually guided actions, such as the ability to utilize an object&#x2019;s history of movement to predict its future movement (<xref ref-type="bibr" rid="B39">Keizer et al., 2008</xref>; <xref ref-type="bibr" rid="B55">Monaco et al., 2019</xref>; <xref ref-type="bibr" rid="B66">Perry and Fallah, 2014</xref>).</p>
<p>Anticipating the direction of a moving object involves two steps: first, one must be able to identify the trajectory of the object (<xref ref-type="bibr" rid="B46">Krekelberg and Lappe, 1999</xref>; <xref ref-type="bibr" rid="B56">Montagne et al., 1999</xref>; <xref ref-type="bibr" rid="B59">M&#x00FC;ller and Abernethy, 2006</xref>; <xref ref-type="bibr" rid="B58">M&#x00FC;ller et al., 2006</xref>). Second, one must be able to look ahead into that path to maximize the chances of successfully catching the object (<xref ref-type="bibr" rid="B10">Bulloch et al., 2015</xref>; <xref ref-type="bibr" rid="B74">Spering et al., 2011</xref>; <xref ref-type="bibr" rid="B76">Thulasiram et al., 2020</xref>). <xref ref-type="bibr" rid="B58">M&#x00FC;ller et al. (2006)</xref> examined how elite cricket batsmen anticipate a bowler&#x2019;s delivery intentions in their sport. Their findings suggest that skilled players rely heavily on early visual cues, such as the bowler&#x2019;s kinematic cues to predict the ball&#x2019;s trajectory. Thus, this ability to anticipate based on early visual information is critical for intercepting a moving object in dynamic environments (<xref ref-type="bibr" rid="B58">M&#x00FC;ller et al., 2006</xref>).</p>
<p>Once the trajectory of a moving object is anticipated, one could fixate ahead in the path of the object prior to grasping it. <xref ref-type="bibr" rid="B10">Bulloch et al. (2015)</xref> conducted a study to examine the effects of manipulating the direction of movement of two-dimensional targets on gaze behavior. To do so, participants were asked to reach and grasp a two-dimensional target that appeared on the leftward or rightward edge of the screen and translated horizontally. Results revealed that participants fixated about 7 cm ahead of the leading edge of the target prior to the onset of its movement (<xref ref-type="bibr" rid="B10">Bulloch et al., 2015</xref>). The researchers attributed these results to participants&#x2019; attempts to eliminate the &#x201C;cognitive effort&#x201D; that must be exerted to catch up to and continue following a target that has already begun moving (<xref ref-type="bibr" rid="B10">Bulloch et al., 2015</xref>). Similar results were observed in a study that utilized vertically moving two-dimensional targets (<xref ref-type="bibr" rid="B76">Thulasiram et al., 2020</xref>). Therefore, anticipating the direction and trajectory of a moving object appears to benefit visual pursuit of that object, and is crucially dependent on the ability to interpret and react to early visual information.</p>
<p>Anticipating the trajectory of a moving object could be achieved through the processing of more than just the low-level visual cues of the object&#x2019;s movement. For instance, <xref ref-type="bibr" rid="B18">Diaz et al. (2013)</xref> found that when individuals are asked to hit a bouncing ball with a racquet, they perform anticipatory saccades in the future trajectory of the bounced ball prior to its bounce. This finding persisted even when the speed and elasticity of the ball were changed before it bounced, suggesting that participants could utilize information beyond what was immediately available to them to guide elements of the goal-directed behavior (<xref ref-type="bibr" rid="B18">Diaz et al., 2013</xref>). Similar anticipatory behavior has been observed in more naturalistic contexts, such as squash, where participants look ahead in the post-bounce trajectory of the ball before it contacts the wall (<xref ref-type="bibr" rid="B31">Hayhoe et al., 2012</xref>). In addition, <xref ref-type="bibr" rid="B1">Aglioti et al. (2008)</xref> demonstrated that elite basketball players can predict the fate of free-throw shots earlier and more accurately than visually experienced non-athletes by relying on subtle kinematic cues from the shooter&#x2019;s body before the ball leaves the hand. Their motor system shows time-specific corticospinal modulation when observing erroneous shots, suggesting that extensive motor experience refines anticipatory mechanisms that support predicting the trajectory of the ball (<xref ref-type="bibr" rid="B1">Aglioti et al., 2008</xref>). Thus, it is evident that the visuomotor system relies on extensive, higher-level information, such as angle of incidence, body configuration of the actor, and future direction of movement, to anticipate the trajectory of a moving target. However, the extent to which the visuomotor system can navigate uncertain environments, where the only available information about the target is its statistical pattern of previous movement, is not yet known. In other words, can the visuomotor system utilize environmental statistics to produce anticipatory behavior when guiding actions?</p>
<p>There is a growing body of evidence that suggests that statistically regular events in our environment lead to the production of anticipatory behavior in perceptual contexts. <xref ref-type="bibr" rid="B60">Notaro et al. (2019)</xref> explored the impact of input statistics on anticipatory behavior using a statistical learning paradigm. By manipulating the probability of target location to be either 70% on the same side as the previous trial or 30%, they found that participants&#x2019; anticipatory saccades were biased toward the same side in the high-probability condition and toward the opposite side in the low-probability condition. These results indicate that participants adjusted their anticipatory gaze based on the pattern-based regularities provided, demonstrating that predictive oculomotor behavior can be shaped by learned statistical regularities and expressed prior to stimulus onset, independent of explicit directional cues. Recent findings further show that implicitly learned event-context associations can bias perceptual predictions. For example, <xref ref-type="bibr" rid="B5">Betti et al. (2022)</xref> used a probabilistic learning design in which contextual cues, such as color, were paired with actions or shapes at different co-occurrence probabilities, creating high-expectancy <italic>(stronger learned association)</italic> and low-expectancy <italic>(weaker learned association)</italic> contexts. Results showed that the learned contextual priors biased the participants&#x2019; predictions when asked to disambiguate events, particularly when visual information was limited and the contextual cue was more strongly predictive (<xref ref-type="bibr" rid="B5">Betti et al., 2022</xref>).</p>
<p>Additional studies have shown that probabilistic regularities also shape anticipatory eye movements during continuous visual tracking. For example, <xref ref-type="bibr" rid="B16">Damasse et al. (2018)</xref> investigated the influence of probabilistic information on anticipatory smooth pursuit eye movements by asking participants to track a moving target that appeared in the middle of the screen and translated horizontally toward the right or the left side of the screen. The frequency of the target&#x2019;s movement to the right or left was manipulated, and participants exhibited a marked increase in anticipatory smooth eye velocities, reflecting a systematic adaptation to the direction bias (<xref ref-type="bibr" rid="B16">Damasse et al., 2018</xref>). Additional studies have also demonstrated that gaze-directed adaptations consistently emerge from implicit learning of directional frequency (e.g., <xref ref-type="bibr" rid="B57">Montagnini et al., 2010</xref>; <xref ref-type="bibr" rid="B68">Santos and Kowler, 2017</xref>; for review, please see <xref ref-type="bibr" rid="B22">Fiehler et al., 2019</xref>), suggesting the visual system is sensitive to probabilistic information regarding stimulus movement. Importantly, however, these studies have demonstrated humans&#x2019; sensitivity to directionality bias and the resulting production of anticipatory gaze behavior, in perceptual contexts that did not require goal-directed action, such as grasping the moving stimulus.</p>
<sec id="S1.SS1">
<label>1.1</label>
<title>Objectives and hypotheses</title>
<p>Often, we pursue the movement of a target stimulus to intercept it, such as the goalie catching a soccer ball. Yet, it remains unknown whether the previously observed ability to exploit a target&#x2019;s directionality bias to produce oculomotor anticipatory behavior is demonstrated when individuals are asked to reach to and grasp the moving target, in addition to simply tracking its eventual movement. In other words, can the visuomotor system effectively leverage implicit probabilistic information acquired from previous experiences to facilitate anticipatory behavior during the planning and execution of goal-directed actions?</p>
<p>The purpose of the present study was to investigate the visuomotor system&#x2019;s ability to utilize the directionality bias of a moving target based on its movement history, to produce anticipatory gaze behavior when performing a reaching and grasping task. Participants were required to reach and grasp a horizontally moving target. In each trial, the target initially appeared at the center of the screen and then translated horizontally toward the right or the left. The horizontal movement of the target was manipulated so that in most trials, the target &#x201C;preferentially favored&#x201D; one direction, establishing a directionality bias. Participants were randomly assigned to one of two groups: a Rightward Bias group, in which the target more often moved to the right, and a Leftward Bias group, in which it more often moved to the left. Using biases in both directions allowed us to attribute any anticipatory gaze shifts to the experimental manipulation rather than to inherent asymmetries in spatial attention. This was important because leftward anticipatory gaze alone could be confounded with pseudoneglect, the tendency for neurotypical adults to allocate attention disproportionately toward the left visual field due to right-hemisphere lateralization of the parieto-frontal network (<xref ref-type="bibr" rid="B75">Thiebaut de Schotten et al., 2011</xref>). Finally, a between-subjects design was necessary to prevent participants from learning or carrying over the directionality bias across conditions.</p>
<p>Considering the literature demonstrating anticipatory gaze behavior during the pursuit of a moving target, and the potential for the memory of environmental probabilities stored within the ventral stream to be communicated to the dorsal stream&#x2019;s guidance of grasping behavior, it was predicted that the visuomotor system would be sensitive to the directionality bias of a moving target, leading to production of anticipatory gaze behavior in the direction of the bias. More specifically, we hypothesized that in the absence of a directionality-bias (target moved an equal number of times toward the right and the left), participants would not demonstrate anticipatory gaze behavior but maintain an average gaze toward the horizontal midline of the stationary target (<xref ref-type="bibr" rid="B17">Desanghere and Marotta, 2011</xref>) due to the uncertain direction of its eventual movement. In contrast, in the presence of a directionality bias, we hypothesized that participants&#x2019; gaze behavior would shift toward the anticipated leading edge of the target while stationary. For instance, participants in the right-directionality-bias (Rightward Bias) group would direct their gaze toward the right edge of the target prior to the onset of its movement, with the opposite pattern would be observed in the left-directionality-bias (Leftward Bias) group. Furthermore, we hypothesized that as participants complete more directionality-biased trials, anticipatory behavior would increase in magnitude, and participants&#x2019; gaze would shift farther away from the midline of the target, toward the anticipated leading edge, thus suggesting that learning of the probabilistic patterns had taken place.</p>
<p>In addition, we hypothesized that anticipatory gaze behavior would systematically shape subsequent oculomotor reactionary behavior once the target began moving. Specifically, we predicted that greater anticipatory gaze in the direction of the eventual target movement would reduce the spatial and temporal demands on initial reactive eye movements, such that the amplitude and duration of the first catch-up saccade following target movement onset would decrease as anticipatory behavior increased, and that the first fixation following this saccade would fall closer to, or slightly ahead of, the target&#x2019;s midline, reflecting more efficient tracking. Finally, we hypothesized that these changes in oculomotor behavior would be accompanied by adaptations in movement kinematics across the two biased trial blocks: as participants learned and exploited the directionality bias, reaching movements were expected to become more efficient, characterized by subtle adjustments in peak reaching velocity, reach duration, and reach latency that would reflect a reduced need for corrective movements once the target was in motion.</p>
</sec>
</sec>
<sec id="S2" sec-type="materials|methods">
<label>2</label>
<title>Materials and methods</title>
<sec id="S2.SS1">
<label>2.1</label>
<title>Participants</title>
<p>Fifty individuals (28 female) between the ages of 17 and 34 years (<italic>M</italic> = 19.90, <italic>SD</italic> = 3.20) were recruited to participate in this study. An <italic>a priori</italic> power analysis was conducted using G&#x002A;Power (version 3.1.9.7) to determine the required sample size. Assuming a mixed-design ANOVA with a within-between interaction, a medium effect size (<italic>f</italic> = 0.25), an alpha level of 0.05, and desired power of 0.90, the analysis indicated a minimum sample size of 46 participants. To account for potential data loss and to ensure adequate statistical power, a total of 50 participants were recruited.</p>
<p>The Rightward Bias group consisted of 10 males and 15 females between the ages of 17 and 34 years (<italic>M</italic> = 20.36, <italic>SD</italic> = 3.91). The Leftward Bias group consisted of 12 males and 13 females between the ages of 18 and 26 years (<italic>M</italic> = 19.44, <italic>SD</italic> = 2.26). Overall, of the 50 participants, 46 were recruited through the Psychology Department Undergraduate Participant Pool at the University of Manitoba and received course credit toward their Introductory Psychology course. The remaining four participants were recruited from the general student body at the University of Manitoba and received a CAD&#x0024;10 Starbucks gift card in exchange for their participation. All experimental procedures conformed to the guidelines of the Declaration of Helsinki. This study was approved by the Research Ethics Board at the University of Manitoba, Fort Garry.</p>
<p>All participants reported to be right-hand dominant, which was verified by a modified version of the Edinburgh Handedness Inventory (<xref ref-type="bibr" rid="B62">Oldfield, 1971</xref>). In addition, all participants had normal or corrected-to-normal vision, and English as their native reading language. English as the native reading language was required for participation to control for any potential spatial biases, which may change based on an individual&#x2019;s native reading language (right-to-left vs. left-to-right; <xref ref-type="bibr" rid="B6">Bowers and Heilman, 1980</xref>; <xref ref-type="bibr" rid="B13">Chokron et al., 1998</xref>; <xref ref-type="bibr" rid="B73">Smith et al., 2015</xref>).</p>
</sec>
<sec id="S2.SS2">
<label>2.2</label>
<title>Materials and procedure</title>
<p>MotionMonitor xGen software (Innovative Sports Training Inc., Chicago, IL, United States) was utilized to generate a two-dimensional target that was 7.00 &#x00D7; 3.50 cm in size (8.02 deg &#x00D7; 4.01 deg), gray in color and moved at a speed of 15 cm/s (17.19 deg/s). The target was displayed on a Dell U2414H 24-in. computer monitor, with a resolution of 1,080p and a refresh rate of 60 Hz. The monitor was placed 50 cm away from the participant.</p>
<p>To track participants&#x2019; hand movements, two wired infrared emitting diodes (IREDs) were placed on the index finger, two on the thumb, and two on the distal radius of the wrist, on their right hand. The four IREDs placed on the index finger and thumb were positioned 0.50 cm away from the fingertips. The IRED wires were made of flexible, light-weight material and were taped to the participants&#x2019; shoulders to prevent any interference throughout the study. Gaze position was recorded by an EyeLink II head-mounted eye-tracking system (250 Hz sampling rate, spatial resolution &#x003C; 0.05 deg; SR research Ltd., Mississauga Ontario, Canada). Three additional IREDs were placed on the EyeLink II headset to track the movements of the head. IRED movements were captured using an Optotrak Certus 3-D motion tracking system (100 Hz sampling rate, spatial accuracy up to 0.01 mm; Northern Digital Inc., Waterloo, Ontario, Canada). The eye-tracking system was calibrated using a 9-point calibration system. Since the EyeLink II head-mounted eye-tracking system and the Optotrak Certus 3-D motion tracking system recorded at two different frequencies, data produced by these two systems were integrated into a common frame of reference using MotionMonitor for analysis.</p>
<p>As illustrated in <xref ref-type="fig" rid="F1">Figure 1</xref>, each trial began when the target block appeared. The target remained stationary in the middle of the screen for approximately 2.5 s. After 2.5 s, the target began moving horizontally. Three seconds after the beginning of the trial, a 450 Hz tone was played for a duration of 250 ms. The tone served as a &#x201C;go&#x201D; signal that directed the participant to reach and grasp the moving target &#x201C;as quickly and naturally as possible&#x201D; using their index finger and thumb. Once grasped, the target stopped in the position in which it was grasped for 2 s, and then it disappeared, indicating the end of the trial. To ensure that the target behaved in a manner like a moving three-dimensional object, the target was programed to stop its movement once the distance of one of the two IREDs placed on the index finger reached 0.50 cm from the screen. Since the IREDs were placed approximately 0.50 cm from the tip of the index finger, the target stopped at the same moment the index finger touched the screen, mimicking a real 3D moving object that stops when grasped.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption><p>An example of a rightward-moving trial. On each trial, the target appeared in the middle of the screen. After 2.5 s, the target began translating horizontally. A tone was played after 0.5 s from target movement onset, prompting participants to begin reaching and grasping the target.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1625415-g001.tif">
<alt-text content-type="machine-generated">Diagram showing four sequential black rectangles, each with a centered gray rectangle, labeled at 0, 2.5, and 3.0 seconds, with a speaker icon at 3.0 seconds. The final panel shows a right hand reaching to touch the rectangle as the trial ends. An arrow points diagonally downward, indicating the sequence.</alt-text>
</graphic>
</fig>
<p>Participants first completed a no-bias block of trials, in which the target translated horizontally toward the left in 50% of the trials and toward the right in the other 50% of trials, in a pseudo-randomized order. There were 24 experimental trials (12 leftward-moving and 12 rightward-moving) and 8 timing distractor trials (4 leftward-moving and 4 rightward-moving) in the no-bias block. Timing distractor trials were added to increase variability between trials to ensure that participants remained attentive throughout the entire study but were not included in the analysis. In the timing distractor trials, the timing of the &#x201C;go&#x201D; tone was manipulated so that it was presented at the same time the target started to move. Following the no-bias block, participants completed two &#x201C;biased&#x201D; blocks, where the directionality bias of the target was set to 75% in the biased direction and 25% in the opposite direction. There were 24 experimental trials in each biased block, 18 of them moved in either the right or left direction, depending on the group the participant was assigned to, while the remaining 6 trials moved in the opposite direction (3:1 ratio of biased to non-biased direction trials). In addition, there were 8 timing distractor trials in the biased group conditions. The target moved toward the direction of the bias in 6 of the timing distractor trials, and in the opposite direction in the remaining 2 trials (3:1 ratio of biased to non-biased distractor trials).</p>
<p>The order in which the trials were organized was pseudorandomized. In the no-bias block, no more than two trials involving target movement in the same direction occurred consecutively. In the two biased blocks, trials moving in the &#x201C;non-biased&#x201D; direction did not appear consecutively. The experimental trials within each block of trials were averaged to create a mean gaze deviation, per block for each participant. Gaze deviation refers to the horizontal displacement of the participant&#x2019;s gaze relative to the target&#x2019;s midline during the anticipatory period.</p>
<p>Each data collection session began with participants signing a consent form detailing information regarding the study. Participants then completed a brief questionnaire reporting age, normal vision, and handedness. Participants were seated on a height-adjustable chair with their chin placed in a chinrest, to keep their heads steady for the duration of the study. Participants were fitted with the tracking equipment and the system calibration and validation commenced. To ensure proper calibration of the system, a calibration check was conducted, where participants were required to stare at a 2 mm dot presented in the middle of the screen for a period of 8 s. Acceptable gaze-error ranges were 0.5 cm or less for the X-coordinate and 1.0 cm or less for the Y-coordinate. Similar error ranges have previously been utilized by <xref ref-type="bibr" rid="B47">Langridge and Marotta (2017</xref>, <xref ref-type="bibr" rid="B48">2020</xref>, <xref ref-type="bibr" rid="B49">2021)</xref>, <xref ref-type="bibr" rid="B10">Bulloch et al. (2015)</xref>, and <xref ref-type="bibr" rid="B76">Thulasiram et al. (2020)</xref>. If the Eyelink calibration check indicated error rates outside of the accepted margin of error, the Eyelink was recalibrated. To ensure accuracy, Eyelink calibration checks were conducted every 16 trials (twice/block; six calibration checks over the entire data collection session). Each session lasted roughly 90 min.</p>
</sec>
<sec id="S2.SS3">
<label>2.3</label>
<title>Data setup and analyses</title>
<p>Oculomotor behaviors of interest in this study were categorized into one of two groups: anticipatory behavior and reactive behavior. Anticipatory behavior is defined as behavior occurring prior to the target&#x2019;s movement onset, the period of the trial during which the target was present, but its eventual movement direction was not known. Whereas reactive behavior is defined as behavior that occurred after the target has begun moving and participants have perceived its movement direction. This categorization allowed us to achieve two goals: examining the influence of directionality bias on anticipatory behavior and investigating how reactive behavior differs in the case of anticipatory behavior changing.</p>
<p>Since we were interested in analyzing the influence of the directionality bias manipulation on participants&#x2019; oculomotor behavior, we first wanted to ensure that any initial bias participants may have had for any reason other than the experimental manipulation was measured and considered. To ensure that there were no significant differences between groups or within blocks for gaze measurement accuracy, a 2 (Directionality Bias Group: leftward or rightward) &#x00D7; 3 (Trial Block: unbiased block, biased block 1, and biased block 2) mixed analysis of variance (ANOVA) was conducted, with directionality bias as a between-subjects factor, and trial block as a within-subject factor on average displacement error as the dependent variable. Average displacement error is defined as the value of the observed error rate obtained at each calibration check, averaged. Moreover, a second 2 (Directionality Bias Group: leftward or rightward) &#x00D7; 3 (Trial Block: unbiased block, biased block 1, and biased block 2) mixed ANOVA was conducted, with directionality bias as a between-subjects factor, and trial block as a within-subject factor on average absolute error as the dependent variable. Average absolute error is defined as the absolute value of observed error rate obtained at each calibration check, averaged. All gaze measurement accuracy metrics were computed based on the horizontal eye position, as the experimental manipulation and target motion were restricted to the horizontal dimension.</p>
<p>To examine the effects of directionality bias on anticipatory behavior, a 2 (Directionality Bias Group: leftward or rightward) &#x00D7; 2 (Trial Block: biased block 1 and biased block 2) mixed ANOVA was conducted, with directionality bias direction as a between-subjects factor, and trial block group as a within-subject factor on gaze deviation as the dependent variable for anticipatory behavior. Gaze deviation is defined as the average distance between participants&#x2019; gaze direction and the target&#x2019;s horizontal midline during the first 2.5 s of each trial. Gaze deviations were standardized such that positive values indicated gaze positions ahead of the target&#x2019;s midline in the direction of the eventual target movement, and negative values indicated gaze positions trailing the target&#x2019;s horizontal midline.</p>
<p>To examine the influences of changes in anticipatory behavior on reactive behavior, a 2 (Directionality Bias Group: leftward or rightward) &#x00D7; 2 (Trial Block: biased block 1 and biased block 2) mixed ANOVA was conducted, with directionality bias direction as a between-subjects factor, and trial block group as a within-subject factor for each of the three following reactive behavior dependent variables: first catch-up saccade after target&#x2019;s movement onset, first fixation after the first catch-up saccade, and fixation at reach onset. The first catch-up saccade is identified through an automatic method combining acceleration and velocity thresholds. It was defined as the initial eye movement occurring at least 100 ms after the target&#x2019;s movement onset, with a velocity surpassing 35&#x00B0;/s. The saccade also included adjacent frames where the acceleration surpassed 1,000&#x00B0;/s<sup>2</sup> (<xref ref-type="bibr" rid="B44">Krauzlis and Miles, 1996</xref>). The 100 ms delay in saccade detection was included to ensure that a saccade was initiated after the target&#x2019;s movement onset had become perceptible (<xref ref-type="bibr" rid="B63">Orban de Xivry and Lef&#x00E8;vre, 2007</xref>). The first fixation following the first catch-up saccade and the fixation immediately preceding or co-occurring with the initiation of the reaching movement were determined based on the dispersion-threshold identification (I-DT) algorithm (<xref ref-type="bibr" rid="B67">Salvucci and Goldberg, 2000</xref>). More specifically, fixations were defined as a pause in eye movements for at least 100 ms, with a maximum dispersion threshold of 1 cm. The first fixation following the catch-up saccade was analyzed to examine whether variations in anticipatory behavior, emerging before the onset of target motion, are reflected in the spatial characteristics of subsequent target tracking. Specifically, we investigated whether enhanced anticipatory behavior facilitates more precise tracking, as indicated by a fixation positioned closer to or ahead of the target&#x2019;s midline during the reaction phase. Furthermore, we analyzed the fixation occurring immediately before or concurrently with reach onset, as previous research suggests that this fixation reflects the selection of the grasp location (<xref ref-type="bibr" rid="B12">Cavina-Pratesi and Hesse, 2013</xref>).</p>
<p>Given our interest in assessing the influence of directionality bias, we sought to ensure that any initial differences in oculomotor behavior between the Rightward and Leftward bias groups were not the result of random variation due to sampling error. To address this, we conducted an independent samples <italic>t</italic>-test, or, when appropriate, its non-parametric equivalent, on each dependent variable during the no-bias condition, comparing the Rightward and Leftward bias groups. The no-bias condition was not included in the aforementioned mixed ANOVAs because it served solely as a baseline assessment rather than as an experimental condition relevant to our hypotheses.</p>
<p>Furthermore, to examine the influence of directionality bias and potential changes in anticipatory or reactionary oculomotor behavior on movement kinematics, we conducted a paired-samples <italic>t</italic>-test, or, when necessary, its non-parametric equivalent, on each of the following dependent variables: maximum reaching velocity, reach duration, and reach latency. Maximum reaching velocity refers to the peak speed achieved during the reach, reach duration refers to the total time from movement onset to target grasp, and reach latency refers to the time between tone presentation and movement onset. Movement onset was defined as the point at which wrist velocity reached 5 cm/s. These analyses were restricted to within-subject comparisons across the two biased trial blocks. Since right-handed reaching toward a target moving rightward differs biomechanically from reaching toward a target moving leftward, requiring a cross-body movement, we did not compare the Rightward and Leftward Bias groups. Any between-subject differences would likely reflect these inherent biomechanical asymmetries rather than effects attributable to the manipulation in this study and thus would not support meaningful interpretation.</p>
<p>To determine if the amount of anticipatory gaze prior to the target&#x2019;s movement predicted fixation behavior after target movement, linear mixed-effect models (LMMs) using the lme4 package in R (<xref ref-type="bibr" rid="B4">Bates et al., 2015</xref>) were fitted to the (i) amplitude and (ii) duration of the first catch-up saccade occurring after target movement, (iii) first fixation after the first catch-up saccade, and (iv) fixation at reach onset. In addition to anticipatory gaze, trial block (biased block 1 and biased block 2) and directionality bias (left and right) were included as controlled-for fixed effect predictors. Participant number was included as a random intercept to account for within participant variability.</p>
<p>Inspections of the LMM residuals revealed severe violation of the assumptions of normality, large Cook&#x2019;s Distance values indicating a disproportionate influence of several data points on model estimates, and moderately high autocorrelation. To address these violations, the original LMMs were replaced with robust mixed-effects models (RMMs) using the robustlmm package in R (<xref ref-type="bibr" rid="B40">Koller, 2016</xref>). RMMs are specifically designed to address the observed limitations, and boast tolerance to non-normal samples, reduction of influence of disproportionately influential data points, and are less sensitive to the presence of autocorrelation. RMMs do not provide the use of traditional hypothesis testing or <italic>p</italic>-values, and therefore the significance of the anticipatory gaze predictor was assessed using 95% confidence intervals (CIs); the predictor was considered statistically significant if 0 was not contained in its 95% CI.</p>
</sec>
</sec>
<sec id="S3" sec-type="results">
<label>3</label>
<title>Results</title>
<sec id="S3.SS1">
<label>3.1</label>
<title>Excluded data</title>
<p>Experimental data were excluded from analysis if the participant executed the task incorrectly (i.e., initiated the reach prior to the presentation of the tone, failed to grasp the target at its actual on-screen location or failed to grasp the target prior to its movement off the screen) or if data was lost due to equipment failure. A total of 6.10% of trials were excluded due to one of these two reasons.</p>
</sec>
<sec id="S3.SS2">
<label>3.2</label>
<title>Gaze measurement accuracy</title>
<p>The average displacement error for all participants across all blocks was &#x2212;0.03 cm (<italic>SD</italic> = 0.24) in the horizontal axis. Across all three blocks, the average displacement error was 0.08 cm (<italic>SD</italic> = 0.25) for participants in the Leftward bias group, and 0.02 cm (<italic>SD</italic> = 0.21) for participants in the Rightward bias group. The analysis indicated no significant main effect of directionality bias group on average displacement error, <italic>F</italic>(1, 48) = 3.97, <italic>p</italic> = 0.052, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.07; no significant main effect of trial block on average displacement error, <italic>F</italic>(1.61, 77.29) = 1.00, <italic>p</italic> = 0.356, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.02; and no significant interaction between directionality bias group and trial block, <italic>F</italic>(1.61, 77.29) = 1.00, <italic>p</italic> = 0.355, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.02.</p>
<p>The average absolute error combined among all participants, and across all blocks, was 0.32 cm (<italic>SD</italic> = 0.12) in the horizontal axis. Across all three blocks, the average absolute error was 0.33 cm (<italic>SD</italic> = 0.12) for participants in the Leftward bias group, and 0.31 cm (<italic>SD</italic> = 0.12) for participants in the Rightward bias group. Results revealed no significant main effect of directionality bias group on absolute error rate, <italic>F</italic>(1, 48) = 0.30, <italic>p</italic> = 0.582, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.01; no significant main effect of trial block on absolute error rate, <italic>F</italic>(2, 96) = 2.21, <italic>p</italic> = 0.115, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.04; and no significant interaction between directionality bias group and trial block, <italic>F</italic>(2, 96) = 0.49, <italic>p</italic> = 0.615, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.01. Together, these results confirm the average displacement error and average absolute gaze error did not differ between the two experimental groups and did not change between the three blocks of experimental trials.</p>
</sec>
<sec id="S3.SS3">
<label>3.3</label>
<title>Potential initial unmanipulated biases</title>
<p>During the no-bias block of trials, we observed similar average anticipatory gaze behavior between the two groups; the Rightward bias group had mean gaze of 0.08 cm ahead of the target&#x2019;s horizontal midline (<italic>SD</italic> = 0.24) and the Leftward bias group had mean gaze of 0.04 cm ahead of the target&#x2019;s horizontal midline (<italic>SD</italic> = 0.24). Results of the independent <italic>t</italic>-test indicated no significant difference in anticipatory gaze behavior between the two groups, <italic>t</italic>(48) = &#x2212;0.58, <italic>p</italic> = 0.562. This shows that during the non-biased condition, participants&#x2019; anticipatory gaze was directed toward the center of the target prior to its movement onset in both groups (approximately 0.06 cm away from the target&#x2019;s center). These results confirm the lack of inherent differences between the Leftward and Rightward bias groups, prior to their exposure to the biased conditions.</p>
</sec>
<sec id="S3.SS4">
<label>3.4</label>
<title>Anticipatory gaze behavior</title>
<p>Confident in the lack of any inherent directional bias in participants&#x2019; gaze unrelated to our experimental manipulation, we examined the effects of directionality bias on anticipatory gaze behavior. The main effect of Directionality Bias Group was not significant, <italic>F</italic>(1, 48) = 3.43, <italic>p</italic> = 0.070, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.07, indicating that anticipatory gaze behavior was not affected by the direction of the bias. However, there was a significant main effect of Trial Block, <italic>F</italic>(1, 48) = 15.84, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.25. Collapsing across the two directionality-biased-blocks, participants in Biased Block 1 displayed a mean gaze deviation of 0.12 cm ahead of the target&#x2019;s horizontal midline (<italic>SD</italic> = 0.68). Whereas participants in biased block 2 displayed a mean gaze deviation of 0.44 cm behind the target&#x2019;s horizontal midline (<italic>SD</italic> = 0.71). This indicates that participants adapted their gaze strategy as the experiment progressed, with gaze during later trials being positioned more distant from the center of the target than earlier trials, as illustrated in <xref ref-type="fig" rid="F2">Figure 2</xref>. The interaction between directionality bias direction and trial block was not significant, <italic>F</italic>(1, 48) = 0.00, <italic>p</italic> = 0.890, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.00, suggesting that the change in anticipatory gaze behavior observed as participants completed more biased trials was not differentially impacted by the bias direction.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption><p>Average gaze deviation in biased block 1 and biased block 2, with the data for the between-subjects groups (directionality bias: leftward and rightward) collapsed. Positive values indicate deviation in the direction of future target movement. Error bars represent standard error of the mean. &#x002A;<italic>p</italic> &#x003C; 0.01.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1625415-g002.tif">
<alt-text content-type="machine-generated">Bar graph comparing average gaze deviation from the target&#x2019;s midline in centimeters for Biased Block 1 and Biased Block 2. Biased Block 2 shows a higher average and larger error bars. An asterisk indicates a statistically significant difference.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS5">
<label>3.5</label>
<title>First catch-up saccade</title>
<p>For the amplitude of the first catch-up saccade after the target&#x2019;s movement onset, a 2 &#x00D7; 2 mixed ANOVA yielded a significant effect for Trial Block, <italic>F</italic>(1, 48) = 7.39, <italic>p</italic> = 0.009, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.13, showing that the amplitude of the catch-up saccade shrunk from biased block 1 to biased block 2, as illustrated in <xref ref-type="fig" rid="F3">Figure 3</xref>. However, the ANOVA yielded no significance for Directionality Bias Group, <italic>F</italic>(1, 48) = 0.52, <italic>p</italic> = 0.476, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.01, and no significant interaction, <italic>F</italic>(1, 48) = 0.01, <italic>p</italic> = 0.908, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.00. An independent samples <italic>t</italic>-test yielded no significant differences between the two Directionality Bias Groups during the no-bias block, <italic>t</italic>(48) = &#x2212;0.43, <italic>p</italic> = 0.668.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption><p>Catch-up saccade amplitudes during both Trial Blocks, with the data for the between-subjects groups (directionality bias: leftward and rightward) collapsed. Error bars represent standard error of the mean. &#x002A;<italic>p</italic> &#x003C; 0.05.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1625415-g003.tif">
<alt-text content-type="machine-generated">Bar graph comparing the amplitude of the first saccade after target&#x2019;s movement onset between Biased Block One and Biased Block Two, showing a significant decrease in amplitude for Block Two, denoted by an asterisk and error bars.</alt-text>
</graphic>
</fig>
<p>Additionally, for the duration of the first catch-up saccade after the target&#x2019;s movement onset, a 2 &#x00D7; 2 mixed ANOVA yielded significance for Trial Block, <italic>F</italic>(1, 48) = 5.77, <italic>p</italic> = 0.020, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.11, revealing shorter saccade duration in biased block 2 (<italic>M</italic> = 28.20 ms, <italic>SD</italic> = 6.23) than in biased block 1 (<italic>M</italic> = 30.11 ms, <italic>SD</italic> = 5.83). However, there was no significant difference for Directionality Bias Group, <italic>F</italic>(1, 48) = 1.15, <italic>p</italic> = 0.288, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.02, and no significant interaction, <italic>F</italic>(1, 48) = 1.65, <italic>p</italic> = 0.205, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.03. An independent samples <italic>t</italic>-test yielded no significant differences between the two Directionality Bias Groups during the no-bias block, <italic>t</italic>(48) = 0.70, <italic>p</italic> = 0.485.</p>
</sec>
<sec id="S3.SS6">
<label>3.6</label>
<title>Fixation after catch-up saccade</title>
<p>For the first fixation after the catch-up saccade, a 2 &#x00D7; 2 mixed ANOVA yielded significance for Directionality Bias Group, <italic>F</italic>(1, 48) = 70.35, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.59, and for Trial Block, <italic>F</italic>(1, 48) = 16.85, <italic>p</italic> &#x003C; 0.001, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.26, but no significant interaction, <italic>F</italic>(1, 48) = 0.66, <italic>p</italic> = 0.199, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.03, as illustrated in <xref ref-type="fig" rid="F4">Figures 4</xref>, <xref ref-type="fig" rid="F5">5</xref>, respectively. In biased block 1, the fixation position was ahead of the target&#x2019;s midline by 0.18 cm (<italic>SD</italic> = 0.94) in the Rightward Bias Group but trailed the target&#x2019;s midline by 2.41 cm (<italic>SD</italic> = 1.30) in the Leftward Bias Group. In biased block 2, the fixation position was ahead of the target&#x2019;s midline by 0.53 cm (<italic>SD</italic> = 1.01) in the Rightward Bias Group but trailed the target&#x2019;s midline by 1.74 cm (<italic>SD</italic> = 1.13) in the Leftward Bias Group. Collapsing across the Directionality Bias Groups, the distance between the fixation position and target&#x2019;s midline shrunk from biased block 1 to biased block 2. This shrinkage pattern persisted for each Directionality Bias Groups, with the Rightward Bias Group showing better target tracking than the Leftward Bias Group. An independent samples <italic>t</italic>-test yielded no significant differences between the two Directionality Bias Groups during the no-bias block, <italic>t</italic>(48) = &#x2212;0.22, <italic>p</italic> = 0.829.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption><p>Fixation deviation between the Leftward Bias and Rightward Bias groups, while collapsing across Trial Block. Y-axis represents the deviation of the first fixation following the catch-up saccade from the target&#x2019;s midline, with negative values indicating fixation trailing the target&#x2019;s midline and positive values indicating fixation ahead of it. Error bars represent standard error of the mean. &#x002A;<italic>p</italic> &#x003C; 0.001.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1625415-g004.tif">
<alt-text content-type="machine-generated">Bar graph showing fixation deviation from target&#x2019;s midline in centimeters for two conditions: Biased Block 1 and Biased Block 2. Biased Block 1 has a larger negative deviation and greater error bars than Biased Block 2. An asterisk below the bars indicates a statistically significant difference between the two blocks.</alt-text>
</graphic>
</fig>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption><p>Fixation deviation across the two Trial Blocks, collapsing across Directionality Bias group. Y-axis represents the deviation of the first fixation following the catch-up saccade from the target&#x2019;s midline, with negative values indicating fixation trailing the target&#x2019;s midline and positive values indicating fixation ahead of it. Error bars represent standard error of the mean. &#x002A;<italic>p</italic> &#x003C; 0.001.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1625415-g005.tif">
<alt-text content-type="machine-generated">Bar graph showing fixation deviation from target&#x2019;s midline in centimeters for leftward and rightward directionality bias groups. Leftward group shows a deviation near negative two with error bars, rightward group shows a positive deviation near zero point five. An asterisk above indicates a statistically significant difference.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS7">
<label>3.7</label>
<title>Fixation at reach onset</title>
<p>For the fixation at reach onset, a 2 &#x00D7; 2 mixed ANOVA yielded significance for Trial Block, <italic>F</italic>(1, 48) = 9.18, <italic>p</italic> = 0.004, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.16, showing that despite fixations falling behind the target&#x2019;s midline in both trial blocks, fixations were closer to the target&#x2019;s midline in biased block 2 compared to biased block 1, as illustrated in <xref ref-type="fig" rid="F6">Figure 6</xref>. However, there was no significant effect for Directionality Bias Group, <italic>F</italic>(1, 48) = 0.75, <italic>p</italic> = 0.391, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.02, and no significant interaction, <italic>F</italic>(1, 48) = 2.24, <italic>p</italic> = 0.141, &#x03B7;<sub><italic>p</italic></sub><sup>2</sup> = 0.05. Moreover, an independent samples <italic>t</italic>-test yielded no significant differences between the two Directionality Bias Groups during the no-bias block, <italic>t</italic>(48) = &#x2212;0.50, <italic>p</italic> = 0.621.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption><p>Effect of trial block on fixation deviation at the time of reach onset, collapsing across Directionality Bias group. Y-axis represents the deviation of the fixation from the target&#x2019;s midline, with negative values indicating fixation trailing the midline. Error bars represent standard error of the mean. &#x002A;<italic>p</italic> &#x003C; 0.01.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyg-17-1625415-g006.tif">
<alt-text content-type="machine-generated">Bar graph comparing fixation deviation from target&#x2019;s midline in centimeters for Biased Block 1 and Biased Block 2. Biased Block 1 shows greater negative deviation and larger error bars than Biased Block 2, with a significant difference indicated by an asterisk.</alt-text>
</graphic>
</fig>
</sec>
<sec id="S3.SS8">
<label>3.8</label>
<title>Kinematic variables</title>
<p>For maximum reaching velocity, a Wilcoxon signed-rank test indicated a significant difference between trial blocks, <italic>z</italic> = &#x2212;2.36, <italic>p</italic> = 0.018, with participants exhibiting lower peak reaching velocity in biased block 2, with an average of 83.31 cm/s (<italic>SD</italic> = 31.77) compared to biased block 1, with an average of 86.54 cm/s (<italic>SD</italic> = 33.26).</p>
<p>For reach duration, a Wilcoxon signed-rank test showed no significant difference between biased block 1 (<italic>M</italic> = 0.54, <italic>SD</italic> = 0.09) and biased block 2 (<italic>M</italic> = 0.54, <italic>SD</italic> = 0.09), <italic>z</italic> = &#x2212;0.56, <italic>p</italic> = 0.576.</p>
<p>For reach latency, a paired-samples <italic>t</italic>-test yielded no significant difference between biased block 1 (<italic>M</italic> = 0.97, <italic>SD</italic> = 0.07) and biased block 2 (<italic>M</italic> = 0.96, <italic>SD</italic> = 0.07), <italic>t</italic>(49) = 1.20, <italic>p</italic> = 0.235.</p>
</sec>
<sec id="S3.SS9">
<label>3.9</label>
<title>Robust mixed-effects models</title>
<p>The fixed-effect estimates are provided in <xref ref-type="table" rid="T1">Tables 1A&#x2013;D</xref>.</p>
<table-wrap position="float" id="T1">
<label>TABLE 1</label>
<caption><p>Fixed-effect estimates for (A) first catch-up saccade amplitude, (B) first catch-up saccade duration, (C) first fixation after catch-up saccade, and (D) fixation at reach onset deviation.</p></caption>
<table cellspacing="5" cellpadding="5" frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left">Fixed effect</th>
<th valign="top" align="center">Estimate</th>
<th valign="top" align="center">Std. error</th>
<th valign="top" align="center">Lower 95% CI</th>
<th valign="top" align="center">Higher 95% CI</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" colspan="5"><bold>(A) First catch-up saccade amplitude</bold></td>
</tr>
<tr>
<td valign="top" align="left">Intercept</td>
<td valign="top" align="center">1.83</td>
<td valign="top" align="center">0.13</td>
<td valign="top" align="center">1.58</td>
<td valign="top" align="center">2.09</td>
</tr>
<tr>
<td valign="top" align="left">Anticipatory gaze</td>
<td valign="top" align="center">&#x2212;0.20</td>
<td valign="top" align="center">0.10</td>
<td valign="top" align="center">&#x2212;0.39</td>
<td valign="top" align="center">&#x2212;0.01</td>
</tr>
<tr>
<td valign="top" align="left">Biased block</td>
<td valign="top" align="center">&#x2212;0.18</td>
<td valign="top" align="center">0.08</td>
<td valign="top" align="center">&#x2212;0.34</td>
<td valign="top" align="center">&#x2212;0.02</td>
</tr>
<tr>
<td valign="top" align="left">Directionality bias</td>
<td valign="top" align="center">0.17</td>
<td valign="top" align="center">0.17</td>
<td valign="top" align="center">&#x2212;0.17</td>
<td valign="top" align="center">0.51</td>
</tr>
<tr>
<td valign="top" align="left" colspan="5"><bold>(B) First catch-up saccade duration</bold></td>
</tr>
<tr>
<td valign="top" align="left">Intercept</td>
<td valign="top" align="center">28.88</td>
<td valign="top" align="center">1.24</td>
<td valign="top" align="center">26.44</td>
<td valign="top" align="center">31.32</td>
</tr>
<tr>
<td valign="top" align="left">Anticipatory gaze</td>
<td valign="top" align="center">&#x2212;1.02</td>
<td valign="top" align="center">1.00</td>
<td valign="top" align="center">&#x2212;2.98</td>
<td valign="top" align="center">0.93</td>
</tr>
<tr>
<td valign="top" align="left">Biased block</td>
<td valign="top" align="center">&#x2212;1.49</td>
<td valign="top" align="center">0.89</td>
<td valign="top" align="center">&#x2212;3.24</td>
<td valign="top" align="center">0.27</td>
</tr>
<tr>
<td valign="top" align="left">Directionality bias</td>
<td valign="top" align="center">1.81</td>
<td valign="top" align="center">1.63</td>
<td valign="top" align="center">&#x2212;1.38</td>
<td valign="top" align="center">5.00</td>
</tr>
<tr>
<td valign="top" align="left" colspan="5"><bold>(C) First fixation after catch-up saccade</bold></td>
</tr>
<tr>
<td valign="top" align="left">Intercept</td>
<td valign="top" align="center">0.17</td>
<td valign="top" align="center">0.20</td>
<td valign="top" align="center">&#x2212;0.23</td>
<td valign="top" align="center">0.56</td>
</tr>
<tr>
<td valign="top" align="left">Anticipatory gaze</td>
<td valign="top" align="center">0.27</td>
<td valign="top" align="center">0.13</td>
<td valign="top" align="center">0.01</td>
<td valign="top" align="center">0.53</td>
</tr>
<tr>
<td valign="top" align="left">Biased block</td>
<td valign="top" align="center">0.37</td>
<td valign="top" align="center">0.10</td>
<td valign="top" align="center">0.18</td>
<td valign="top" align="center">0.57</td>
</tr>
<tr>
<td valign="top" align="left">Directionality bias</td>
<td valign="top" align="center">&#x2212;2.47</td>
<td valign="top" align="center">0.27</td>
<td valign="top" align="center">&#x2212;3.00</td>
<td valign="top" align="center">&#x2212;1.93</td>
</tr>
<tr>
<td valign="top" align="left" colspan="5"><bold>(D) Fixation at reach onset deviation</bold></td>
</tr>
<tr>
<td valign="top" align="left">Intercept</td>
<td valign="top" align="center">&#x2212;1.60</td>
<td valign="top" align="center">0.33</td>
<td valign="top" align="center">&#x2212;2.25</td>
<td valign="top" align="center">&#x2212;0.94</td>
</tr>
<tr>
<td valign="top" align="left">Anticipatory gaze</td>
<td valign="top" align="center">0.12</td>
<td valign="top" align="center">0.14</td>
<td valign="top" align="center">&#x2212;0.16</td>
<td valign="top" align="center">0.40</td>
</tr>
<tr>
<td valign="top" align="left">Biased block</td>
<td valign="top" align="center">0.25</td>
<td valign="top" align="center">0.10</td>
<td valign="top" align="center">0.06</td>
<td valign="top" align="center">0.44</td>
</tr>
<tr>
<td valign="top" align="left">Directionality bias</td>
<td valign="top" align="center">0.43</td>
<td valign="top" align="center">0.47</td>
<td valign="top" align="center">&#x2212;0.48</td>
<td valign="top" align="center">1.35</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn><p>Estimates represent unstandardized coefficients from robust linear mixed models with Participant included as a random intercept. Predictors whose 95% confidence intervals do not contain 0 are interpreted as statistically significant. For the categorical predictors Biased Block and Directionality Bias, the first of the two biased blocks and the rightward directionality bias served as reference levels, respectively.</p></fn>
</table-wrap-foot>
</table-wrap>
<sec id="S3.SS9.SSS1">
<label>3.9.1</label>
<title>First catch-up saccade amplitude</title>
<p>95% CIs for the anticipatory gaze predictor did not include 0, and therefore anticipatory gaze was determined to be a significantly negative predictor of first catch-up saccade amplitude. A larger amount anticipatory gaze prior to the movement onset of the target significantly predicted smaller catch-up saccade amplitude (<xref ref-type="table" rid="T1">Table 1A</xref>).</p>
</sec>
<sec id="S3.SS9.SSS2">
<label>3.9.2</label>
<title>First catch-up saccade duration</title>
<p>95% CIs for the anticipatory gaze predictor contained 0, and therefore anticipatory gaze was determined non-significant (<xref ref-type="table" rid="T1">Table 1B</xref>).</p>
</sec>
<sec id="S3.SS9.SSS3">
<label>3.9.3</label>
<title>First fixation after the first catch-up saccade</title>
<p>95% CIs for the anticipatory gaze predictor did not include 0, and therefore anticipatory gaze was determined to be a significantly positive predictor of the deviation from first fixation after catch-up saccade to target midline A larger amount anticipatory gaze prior to the movement onset of the target significantly predicted a larger deviation between the fixation location and target midline (<xref ref-type="table" rid="T1">Table 1C</xref>). In other words, the more a participant engaged in anticipatory behavior prior to target movement, the more their first fixation after the catch-up saccade was ahead of the target&#x2019;s midline, suggesting better tracking performance.</p>
</sec>
<sec id="S3.SS9.SSS4">
<label>3.9.4</label>
<title>Fixation at reach onset</title>
<p>95% CIs for the anticipatory gaze predictor contained 0, and therefore anticipatory gaze was determined non-significant (<xref ref-type="table" rid="T1">Table 1D</xref>).</p>
</sec>
</sec>
</sec>
<sec id="S4" sec-type="discussion">
<label>4</label>
<title>Discussion</title>
<p>The present study aimed to investigate the effects of directionality bias on anticipatory and reactive gaze behavior during a reaching-and-grasping task where the only indication regarding direction of future target movement was movement history. Participants were presented with a target with a manipulated directionality bias, causing it to move predominantly either leftward or rightward, depending on the experimental condition. Our findings indicate that participants&#x2019; anticipatory gaze behavior is sensitive to the directionality bias of a moving target, supporting our hypothesis that under uncertain conditions, the visuomotor system can utilize movement history to predict future target positions during the execution of goal-directed behavior. We observed a significant increase in anticipatory gaze deviation in the second block of biased trials compared to the first block, indicating that participants effectively adapted to the directionality bias of the target over time. Unlike previous studies that focused on trajectory identification in action tasks (<xref ref-type="bibr" rid="B10">Bulloch et al., 2015</xref>; <xref ref-type="bibr" rid="B18">Diaz et al., 2013</xref>; <xref ref-type="bibr" rid="B31">Hayhoe et al., 2012</xref>; <xref ref-type="bibr" rid="B76">Thulasiram et al., 2020</xref>) or directionality bias utilization in perceptual tasks (<xref ref-type="bibr" rid="B16">Damasse et al., 2018</xref>; <xref ref-type="bibr" rid="B57">Montagnini et al., 2010</xref>; <xref ref-type="bibr" rid="B60">Notaro et al., 2019</xref>; <xref ref-type="bibr" rid="B68">Santos and Kowler, 2017</xref>), our research underscores the visuomotor system&#x2019;s capacity to mitigate uncertainty by leveraging learned statistical patterns to successfully execute goal-directed actions when the eventual direction of target movement is unknown.</p>
<p>Furthermore, in this study, we examined the influence of anticipatory behavior on subsequent tracking behavior. Results revealed a reduction in the amplitude of catch-up saccades during biased block 2 relative to biased block 1. Notwithstanding this decrease in saccade amplitude, both the fixation immediately following the catch-up saccade and the fixation at reach onset were positioned closer to, or ahead of, the target&#x2019;s midline in biased block 2 compared to biased block 1, reflecting improved tracking performance.</p>
<p>To directly assess the role of anticipatory behavior in shaping reactive oculomotor responses, we ran robust mixed-effects analyses, which revealed that anticipatory gaze significantly predicted both the amplitude of the first catch-up saccade and the spatial position of the first fixation following that saccade, providing converging evidence that early predictive behavior facilitated later oculomotor control (<xref ref-type="bibr" rid="B71">Sim&#x00F3; et al., 2005</xref>). The reduction in the distance between fixation position following the first catch-up saccade and the target&#x2019;s midline across Biased Blocks is consistent with increasingly accurate anticipatory gaze placement prior to target movement onset. When gaze is positioned closer to the expected future location of the target, less corrective adjustment is required once the target begins moving, resulting in both smaller amplitude of the catch-up saccades and fixation locations that are more closely aligned with the target&#x2019;s trajectory.</p>
<p>This interpretation aligns with prior work demonstrating that anticipatory eye movements reduce subsequent position error and improve tracking efficiency by minimizing the need for reactive correction following motion onset (<xref ref-type="bibr" rid="B43">Kowler, 1989</xref>; <xref ref-type="bibr" rid="B2">Barnes and Asselman, 1991</xref>; <xref ref-type="bibr" rid="B18">Diaz et al., 2013</xref>). Taken together, the reduction fixation deviation from target&#x2019;s midline following the first catch-up saccade and the correlation between anticipatory gaze and fixation position following the catch-up saccade suggest that improvements in reactive tracking were directly shaped by where gaze was positioned prior to target motion. As participants increasingly directed their gaze toward the anticipated future path of the target, reactionary oculomotor tracking behavior improved, requiring less online corrective adjustments. Finally, although anticipatory gaze did not significantly predict fixation at reach onset, fixation placement at reach onset nevertheless improved across biased blocks, suggesting that anticipatory strategies informed subsequent grasp planning even if not in a direct linear manner.</p>
<p>In addition to changes in oculomotor behavior, we observed changes in the kinematic profile of the reaching movement as participants completed more biased trials. More specifically, maximum reaching velocity significantly decreased from biased block 1 to biased block 2, with no significant differences observed for reach latency or reach duration. A reduction in peak velocity, in the absence of changes in movement onset timing or overall movement duration, is commonly interpreted as reflecting a reduced reliance on rapid online corrective adjustments during movement execution rather than a slowing of action planning or a more cautious movement strategy (<xref ref-type="bibr" rid="B21">Elliott et al., 2001</xref>; <xref ref-type="bibr" rid="B69">Saunders and Knill, 2004</xref>).</p>
<p>In the context of the present task, this pattern is consistent with the interpretation that improved anticipatory gaze placement reduced spatial uncertainty once the target began moving, allowing the reach to be guided more smoothly. Notably, the absence of significant changes in reach latency suggests that anticipatory gaze did not simply facilitate earlier movement initiation, but instead influenced how the movement was controlled following onset.</p>
<p>This dissociation aligns with previous work demonstrating that predictive visual information primarily affects movement execution and online control, rather than movement initiation (<xref ref-type="bibr" rid="B69">Saunders and Knill, 2004</xref>), supporting the view that learned directionality bias contributes to more efficient goal-directed reaching by reducing reliance on online corrective control. The changes in the kinematic profile of the reaching movement also support the claim made by <xref ref-type="bibr" rid="B10">Bulloch et al. (2015)</xref> positing that anticipating the direction of movement of the target by looking ahead in its eventual path eliminates the &#x201C;cognitive effort&#x201D; that must be exerted to catch up to and continue following a target after its movement onset.</p>
<p>This study demonstrates the functional relationship between learned environmental statistics and visuomotor control. Despite the lack of any cues about a target&#x2019;s eventual movement direction, except its movement history, participants exhibited progressively stronger anticipatory behavior across biased blocks, with this anticipatory behavior being a significant predictor of aspects of subsequent tracking behavior. This suggests that the visuomotor system made active use of implicitly acquired probabilistic knowledge to facilitate goal-directed action. This aligns with the idea that dorsal-stream control of action can draw upon representations encoded within ventral memory systems when circumstances demand predictive processing (<xref ref-type="bibr" rid="B27">Goodale, 2011</xref>; <xref ref-type="bibr" rid="B53">Milner and Goodale, 2006</xref>; <xref ref-type="bibr" rid="B55">Monaco et al., 2019</xref>). Our findings therefore extend previous research by demonstrating that predictive integration of movement history is expressed not merely in perceptual decisions (<xref ref-type="bibr" rid="B5">Betti et al., 2022</xref>; <xref ref-type="bibr" rid="B60">Notaro et al., 2019</xref>), but also in visuomotor contexts, thereby increasing one&#x2019;s chances of successfully completing the goal-directed behavior.</p>
<p>The observed pattern of reactive gaze behavior further supports the role of prediction in guiding target tracking. Our robust mixed-effects analyses revealed that anticipatory gaze significantly predicted both the amplitude of the first catch-up saccade and the spatial position of the subsequent fixation. In other words, when participants exhibited stronger anticipatory behavior prior to target movement onset, they required smaller corrective saccades and achieved more optimal fixation placement once the target began moving. This pattern aligns with claims made by our lab, <xref ref-type="bibr" rid="B10">Bulloch et al. (2015)</xref> and <xref ref-type="bibr" rid="B76">Thulasiram et al. (2020)</xref>, suggesting that looking ahead reduces the &#x201C;effort&#x201D; required to catch up to a moving stimulus.</p>
<p>Critically, our findings demonstrate that this predictive efficiency emerged in the absence of any explicit cues; participants inferred the likely direction of the target based solely on its movement history. This reflects implicit learning, in which environmental regularities are extracted and used to guide behavior (<xref ref-type="bibr" rid="B23">Fiser and Aslin, 2002</xref>; <xref ref-type="bibr" rid="B78">Turk-Browne, 2012</xref>). Importantly, related work indicates that environmental statistics can be learned and expressed through multiple behavioral channels without necessitating a direct correspondence between them. For instance, <xref ref-type="bibr" rid="B65">Pasturel et al. (2020)</xref> demonstrated that observers form internal representations of probabilistic motion patterns that influence both anticipatory eye movements and explicit judgments about target direction. However, these two behavioral expressions were not strongly correlated across individuals. This dissociation suggests that learned statistical patterns can be utilized flexibly to guide behavior in a task-dependent manner, without requiring that such information be uniformly accessible or expressed across behavioral domains. Within this framework, the present findings extend prior work by demonstrating that implicitly acquired movement statistics are sufficient to support predictive visuomotor behavior, which in turn shapes reactive behavior during the execution of goal-directed actions.</p>
<p>The distinction between early tracking behaviors, such as the first catch-up saccade after target movement onset and the fixation that follows it, and later tracking behavior, such as the fixation at reach onset, further clarifies how prediction shapes visuomotor behavior. Although anticipatory gaze significantly predicted early tracking behaviors, its influence on fixation at reach onset was less direct. Since the fixation at onset of the reaching movement reflects the selection of a grasp location (<xref ref-type="bibr" rid="B12">Cavina-Pratesi and Hesse, 2013</xref>), this suggests that learned environmental statistics is more straightforwardly applied during the initial phase of target pursuit, whereas grasp-related gaze control incorporates both learned expectations and online visual feedback. In other words, prediction appears to play a clearer role in guiding tracking immediately after movement onset, while the fixation associated with grasping reflects a more integrated process, combining what has been learned with what is currently being seen.</p>
<p>The observed capacity to anticipate target&#x2019;s movements without explicit cues aligns with the notion that the brain employs probabilistic models to manage sensory and motor uncertainty, with the model centering around minimizing error and maximizing reward (<xref ref-type="bibr" rid="B15">Cox, 1946</xref>; <xref ref-type="bibr" rid="B41">K&#x00F6;rding and Wolpert, 2004</xref>, <xref ref-type="bibr" rid="B42">2006</xref>; <xref ref-type="bibr" rid="B78">Turk-Browne, 2012</xref>). For instance, studies have shown that during sensorimotor learning, the central nervous system employs Bayesian strategies to integrate sensory information with prior distributions of task variables, thus improving motor control under uncertain conditions (<xref ref-type="bibr" rid="B41">K&#x00F6;rding and Wolpert, 2004</xref>). Another study demonstrated that the motor response to uncertain visual stimuli is influenced by the brain&#x2019;s probabilistic estimation of sensory states, further highlighting the reliance on Bayesian inference in visuomotor control (<xref ref-type="bibr" rid="B34">Izawa and Shadmehr, 2008</xref>). What is more impressive than the brain&#x2019;s ability to adopt complex probabilistic mechanisms to minimize error and maximize reward is its ability to do so implicitly, without one&#x2019;s awareness and perhaps prior to the manifestation of behavior indicating that statistical learning has occurred (<xref ref-type="bibr" rid="B54">Miyashita, 1993</xref>; <xref ref-type="bibr" rid="B34">Izawa and Shadmehr, 2008</xref>; <xref ref-type="bibr" rid="B78">Turk-Browne, 2012</xref>). Therefore, it is important to discuss the possible neural mechanisms that give humans this ability to implicitly engage in statistical learning and utilize that knowledge to guide behavior in an efficient manner. While this study did not specifically explore the neural correlates of statistical learning and anticipation in uncertain visuomotor contexts, our findings offer substantial insights into the neural mechanisms underpinning the observed behaviors.</p>
<p>From the perspective of the two-visual-stream hypothesis, the ability to produce anticipatory gaze behavior in uncertain visuomotor contexts can be understood as a result of the interactions between the visually guided action (dorsal) and perceptual (ventral) streams (<xref ref-type="bibr" rid="B29">Goodale and Milner, 1992</xref>; <xref ref-type="bibr" rid="B53">Milner and Goodale, 2006</xref>). Although traditionally characterized as processing distinct forms of information, substantial evidence indicates robust interactions between the streams (<xref ref-type="bibr" rid="B9">Budisavljevic et al., 2018</xref>) during goal-directed behavior, whether acting on three-dimensional objects (<xref ref-type="bibr" rid="B36">Janssen et al., 2018</xref>; <xref ref-type="bibr" rid="B79">Van Dromme et al., 2016</xref>) or on two-dimensional (<xref ref-type="bibr" rid="B24">Freud et al., 2018</xref>). Such interactions likely support the utilization of a target&#x2019;s movement history to anticipate its future movement direction, with ventral-stream processing contributing contextual and historical knowledge of learned directional bias that can enhance dorsal-stream control of visually guided actions (<xref ref-type="bibr" rid="B24">Freud et al., 2018</xref>; <xref ref-type="bibr" rid="B19">Donato et al., 2020</xref>).</p>
<p>At the same time, emerging evidence challenges the long-standing assumption that the dorsal stream operates exclusively in a feedforward, memory-independent manner, with behavioral priming (<xref ref-type="bibr" rid="B37">Jax and Rosenbaum, 2007</xref>, <xref ref-type="bibr" rid="B38">2009</xref>), processing of generalizable physical properties outside of motor planning contexts (<xref ref-type="bibr" rid="B8">Buckingham et al., 2018</xref>; <xref ref-type="bibr" rid="B70">Schwettmann et al., 2019</xref>), and even contributions to recognition (<xref ref-type="bibr" rid="B26">Goldstein-Marcusohn et al., 2024</xref>) indicating the dorsal stream&#x2019;s capacity to retain and apply information from prior experiences. This evolving view is further supported by evidence that cerebellar regions maintain internal sensory-motor models that enable prediction of the consequences of one&#x2019;s own actions, contributing critically to timing, coordination, and anticipatory scaling during grasping (<xref ref-type="bibr" rid="B61">Nowak et al., 2021</xref>). Such predictive mechanisms align with the neural architecture proposed by <xref ref-type="bibr" rid="B45">Kravitz et al. (2011)</xref>, in which a parieto-prefrontal pathway links dorsal-stream visuomotor control to regions implicated in top-down oculomotor control (<xref ref-type="bibr" rid="B33">Hung et al., 2011</xref>) and spatial working memory (<xref ref-type="bibr" rid="B25">Goldman-Rakic, 1990</xref>). Altogether, these findings provide a plausible neural account for the anticipatory gaze behavior observed in our study, suggesting that learned statistical information derived from movement history can be applied implicitly to guide goal-directed behavior, whether through crosstalk between the ventral and dorsal streams or through intrinsic dorsal-cerebellar mechanisms capable of incorporating prior experience.</p>
<p>Although reaching to grasp a three-dimensional object more closely reflects everyday action, the use of two-dimensional targets, such as the target employed in this study, offers strong methodological advantages. Work from our lab, as well as other research groups, have used 2D targets to explore visual perception and action. Using two-dimensional simulations allows for more rigorous manipulation of various characteristics of the target and its environment, such as shape, size, speed, and direction of movement, enabling researchers to draw more precise and systematic conclusions. While important to recognize the differences between reaching to grasp a 3D object and reaching to grasp a 2D target, recent research studies show that certain behavioral features such as gaze behavior are similar when reaching-to-grasp a 2D object and a 3D object (<xref ref-type="bibr" rid="B10">Bulloch et al., 2015</xref>; <xref ref-type="bibr" rid="B47">Langridge and Marotta, 2017</xref>, <xref ref-type="bibr" rid="B49">2021</xref>; <xref ref-type="bibr" rid="B76">Thulasiram et al., 2020</xref>). As such, given that our primary aim was to examine how directionality bias influences goal-directed visuomotor behavior rather than to reproduce the full mechanical demands of grasping real objects, the use of 2D targets provides a valid and effective means of isolating and characterizing the anticipatory and tracking behaviors of interest.</p>
<p>The present design allowed for precise control over target motion and directionality bias; however, a few limitations should be acknowledged. First, the use of a two-dimensional target, while advantageous for isolating the effects of movement history on anticipatory and reactive behavior, necessarily simplifies the perceptual and mechanical demands associated with naturalistic reaching and grasping. As a result, the extent to which the observed anticipatory gaze strategies and associated kinematic adaptations generalize to interactions with fully three-dimensional objects remains an open question. In addition, although the sample size was sufficient to detect moderate effects, the present study may have been underpowered to detect smaller effects, which may partly account for some non-significant findings and warrant caution when interpreting null results. Finally, the task involved highly constrained and predictable target trajectories, which may have facilitated the detection of implicit statistical regularities. In more complex environments, where target motion could be more variable, the ability to exploit movement history may be reduced or expressed differently. Therefore, the extent to which these findings generalize to less constrained task environments remain to be determined.</p>
<p>Although the present study was conducted in a controlled laboratory setting, the findings have broader implications for understanding how predictive visuomotor strategies support performance in dynamic, uncertain environments. The ability to exploit movement history to guide anticipatory gaze and reduce reliance on online corrective control is central to many real-world tasks, such as sports, tool use, and other activities that require the interception of moving objects. In such contexts, efficient gaze allocation can facilitate smoother movement execution and reduce the computational demands associated with rapid corrective adjustments. More broadly, these findings may inform the design of human-machine interfaces and virtual training environments, including those used for supervising or interacting with autonomous systems such as drones, by highlighting the importance of predictable motion statistics for supporting anticipatory control. While the current study does not directly assess performance outcomes in applied settings, it provides a behavioral foundation for understanding how learned environmental statistics can be integrated into goal-directed action to improve efficiency under uncertainty.</p>
<p>Future research is needed to characterize the parameters under which directionality bias can be exploited to guide goal-directed behavior. Although the present study demonstrates that learned statistical regularities of a target&#x2019;s movement influence both anticipatory gaze and subsequent tracking, future work should explore the limits of this capacity. For example, manipulating factors such as the bias ratio, the degree of trial-to-trial uncertainty, or the level of task complexity could clarify the conditions under which movement history can be utilized and integrated into action planning. In addition, future work could examine the temporal evolution of anticipatory gaze within the initial stationary period, prior to the target&#x2019;s movement onset, which may provide further insight into how anticipatory oculomotor behavior emerges and stabilizes prior to target movement onset. Moreover, given that anticipatory gaze reflects the allocation of visual attention (<xref ref-type="bibr" rid="B3">Bastiaansen and Brunia, 2001</xref>), an important extension of this study would be to examine how the use of directionality bias differs when visual information is available in foveal vs. peripheral vision. These manipulations would clarify whether the advantages of anticipatory gaze behavior demonstrated in this study generalize beyond foveal tracking and reflect a broader mechanism for integrating learned statistical information into goal-directed action. Finally, it will be valuable to determine whether the effects demonstrated in this study generalize to conditions in which detecting and applying statistical regularities is even more consequential, including curved target trajectories, temporary occlusions, delayed movement onset, or risk-reward scenarios in which accurate interception is necessary.</p>
</sec>
<sec id="S5" sec-type="conclusion">
<label>5</label>
<title>Conclusion</title>
<p>While considerable research has focused on the visuomotor system&#x2019;s ability to produce anticipatory behavior in uncertain perceptual contexts, our study aimed to explore this capacity within the context of goal-directed actions. We tasked participants with reaching toward and grasping a moving target that exhibited a manipulated directionality bias. This design allowed us to examine their ability to engage in statistical learning to exploit the bias and produce anticipatory behavior in response, relying solely on the target&#x2019;s movement history and without any additional cues. Our results show that the visuomotor system can implicitly exploit the directionality bias of a target and produce anticipatory gaze behavior when reaching to grasp that target. Furthermore, the results showed that anticipatory behavior did not occur in isolation, instead, modeling revealed that anticipatory behavior led to meaningful downstream consequences. Stronger anticipatory gaze predicted more efficient reactive tracking, characterized by reduced corrective saccade amplitude and improved fixation placement while tracking the target. Anticipatory gaze was also accompanied by changes in movement kinematics, suggesting that implicit statistical learning not only guides early visual anticipation but also facilitates the efficiency of the resulting reaching movement. Together, these patterns indicate that learned statistical or directional bias is functionally integrated into visuomotor control, shaping both eye movement planning and reaching movement. Future studies should also focus on determining the conditions under which a directionality bias can be utilized, as well as the minimum threshold of bias required for utilization. Notably, it is important to ascertain the extent to which the bias ratio can deviate from one before it becomes unusable.</p>
</sec>
</body>
<back>
<sec id="S6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found at: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5683/SP3/TITXEK">https://doi.org/10.5683/SP3/TITXEK</ext-link>.</p>
</sec>
<sec id="S7" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Research Ethics Board at the University of Manitoba, Fort Garry. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="S8" sec-type="author-contributions">
<title>Author contributions</title>
<p>YE: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Software, Supervision, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. RL: Methodology, Software, Validation, Writing &#x2013; review &#x0026; editing. JM: Conceptualization, Funding acquisition, Methodology, Resources, Supervision, Validation, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>We extend our deepest gratitude to all participants involved in this study. Furthermore, we would like to thank Alexis Chu for his assistance with data collection.</p>
</ack>
<sec id="S10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="S11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="S12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Aglioti</surname> <given-names>S. M.</given-names></name> <name><surname>Cesari</surname> <given-names>P.</given-names></name> <name><surname>Romani</surname> <given-names>M.</given-names></name> <name><surname>Urgesi</surname> <given-names>C.</given-names></name></person-group> (<year>2008</year>). <article-title>Action anticipation and motor resonance in elite basketball players.</article-title> <source><italic>Nat. Neurosci.</italic></source> <volume>11</volume> <fpage>1109</fpage>&#x2013;<lpage>1116</lpage>. <pub-id pub-id-type="doi">10.1038/nn.2182</pub-id> <pub-id pub-id-type="pmid">19160510</pub-id></mixed-citation></ref>
<ref id="B2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barnes</surname> <given-names>G. R.</given-names></name> <name><surname>Asselman</surname> <given-names>P. T.</given-names></name></person-group> (<year>1991</year>). <article-title>The mechanism of prediction in human smooth pursuit eye movements.</article-title> <source><italic>J. Physiol.</italic></source> <volume>439</volume> <fpage>439</fpage>&#x2013;<lpage>461</lpage>. <pub-id pub-id-type="doi">10.1113/jphysiol.1991.sp018675</pub-id> <pub-id pub-id-type="pmid">1895243</pub-id></mixed-citation></ref>
<ref id="B3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bastiaansen</surname> <given-names>M. C. M.</given-names></name> <name><surname>Brunia</surname> <given-names>C. H. M.</given-names></name></person-group> (<year>2001</year>). <article-title>Anticipatory attention: an event-related desynchronization approach.</article-title> <source><italic>Int. J. Psychophysiol.</italic></source> <volume>43</volume> <fpage>91</fpage>&#x2013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.1016/S0167-8760(01)00181-7</pub-id> <pub-id pub-id-type="pmid">11742687</pub-id></mixed-citation></ref>
<ref id="B4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bates</surname> <given-names>D.</given-names></name> <name><surname>M&#x00E4;chler</surname> <given-names>M.</given-names></name> <name><surname>Bolker</surname> <given-names>B.</given-names></name> <name><surname>Walker</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>Fitting linear mixed-effects models using lme4.</article-title> <source><italic>J. Stat. Softw.</italic></source> <volume>67</volume> <fpage>1</fpage>&#x2013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.18637/jss.v067.i01</pub-id></mixed-citation></ref>
<ref id="B5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Betti</surname> <given-names>S.</given-names></name> <name><surname>Finisguerra</surname> <given-names>A.</given-names></name> <name><surname>Amoruso</surname> <given-names>L.</given-names></name></person-group> <collab>Cosimo Urgesi</collab>. (<year>2022</year>). <article-title>Contextual priors guide perception and motor responses to observed actions.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>32</volume> <fpage>608</fpage>&#x2013;<lpage>625</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhab241</pub-id> <pub-id pub-id-type="pmid">34297809</pub-id></mixed-citation></ref>
<ref id="B6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bowers</surname> <given-names>D.</given-names></name> <name><surname>Heilman</surname> <given-names>K. M.</given-names></name></person-group> (<year>1980</year>). <article-title>Pseudoneglect: effects of hemispace on a tactile line bisection task.</article-title> <source><italic>Neuropsychologia</italic></source> <volume>18</volume> <fpage>491</fpage>&#x2013;<lpage>498</lpage>. <pub-id pub-id-type="doi">10.1016/0028-3932(80)90151-7</pub-id> <pub-id pub-id-type="pmid">6777712</pub-id></mixed-citation></ref>
<ref id="B7"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brandi, Wohlschl&#x00E4;ger</surname> <given-names>A.</given-names></name> <name><surname>Sorg</surname> <given-names>C.</given-names></name> <name><surname>Hermsd&#x00F6;rfer</surname> <given-names>J.</given-names></name></person-group> (<year>2014</year>). <article-title>The neural correlates of planning and executing actual tool use.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>34</volume> <fpage>13183</fpage>&#x2013;<lpage>13194</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.0597-14.2014</pub-id> <pub-id pub-id-type="pmid">25253863</pub-id></mixed-citation></ref>
<ref id="B8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buckingham</surname> <given-names>G.</given-names></name> <name><surname>Holler</surname> <given-names>D.</given-names></name> <name><surname>Michelakakis</surname> <given-names>E. E.</given-names></name> <name><surname>Snow</surname> <given-names>J. C.</given-names></name></person-group> (<year>2018</year>). <article-title>Preserved object weight processing after bilateral lateral occipital complex lesions.</article-title> <source><italic>J. Cogn. Neurosci.</italic></source> <volume>30</volume> <fpage>1683</fpage>&#x2013;<lpage>1690</lpage>. <pub-id pub-id-type="doi">10.1162/jocn_a_01314</pub-id> <pub-id pub-id-type="pmid">30024326</pub-id></mixed-citation></ref>
<ref id="B9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Budisavljevic</surname> <given-names>S.</given-names></name> <name><surname>Dell&#x2019;Acqua</surname> <given-names>F.</given-names></name> <name><surname>Castiello</surname> <given-names>U.</given-names></name></person-group> (<year>2018</year>). <article-title>Cross-talk connections underlying dorsal and ventral stream integration during hand actions.</article-title> <source><italic>Cortex</italic></source> <volume>103</volume> <fpage>224</fpage>&#x2013;<lpage>239</lpage>. <pub-id pub-id-type="doi">10.1016/j.cortex.2018.02.016</pub-id> <pub-id pub-id-type="pmid">29660652</pub-id></mixed-citation></ref>
<ref id="B10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bulloch, Prime</surname> <given-names>S. L.</given-names></name> <name><surname>Marotta</surname> <given-names>J. J.</given-names></name></person-group> (<year>2015</year>). <article-title>Anticipatory gaze strategies when grasping moving objects.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>233</volume> <fpage>3413</fpage>&#x2013;<lpage>3423</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-015-4413-7</pub-id> <pub-id pub-id-type="pmid">26289482</pub-id></mixed-citation></ref>
<ref id="B11"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carnahan</surname> <given-names>H.</given-names></name> <name><surname>Goodale</surname> <given-names>M.</given-names></name> <name><surname>Marteniuk</surname> <given-names>R.</given-names></name></person-group> (<year>1993</year>). <article-title>Grasping versus pointing and the differential use of visual feedback.</article-title> <source><italic>Hum. Move. Sci.</italic></source> <volume>12</volume> <fpage>219</fpage>&#x2013;<lpage>234</lpage>. <pub-id pub-id-type="doi">10.1016/0167-9457(93)90016-I</pub-id></mixed-citation></ref>
<ref id="B12"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cavina-Pratesi</surname> <given-names>C.</given-names></name> <name><surname>Hesse</surname> <given-names>C.</given-names></name></person-group> (<year>2013</year>). <article-title>Why do the eyes prefer the index finger? Simultaneous recording of eye and hand movements during precision grasping.</article-title> <source><italic>J. Vis.</italic></source> <volume>13</volume>:<fpage>15</fpage>. <pub-id pub-id-type="doi">10.1167/13.5.15</pub-id> <pub-id pub-id-type="pmid">23599419</pub-id></mixed-citation></ref>
<ref id="B13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chokron</surname> <given-names>S.</given-names></name> <name><surname>Bartolomeo</surname> <given-names>P.</given-names></name> <name><surname>Perenin</surname> <given-names>M. T.</given-names></name> <name><surname>Helft</surname> <given-names>G.</given-names></name> <name><surname>Imbert</surname> <given-names>M.</given-names></name></person-group> (<year>1998</year>). <article-title>Scanning direction and line bisection: a study of normal subjects and unilateral neglect patients with opposite reading habits.</article-title> <source><italic>Brain Res.</italic></source> <volume>7</volume> <fpage>173</fpage>&#x2013;<lpage>178</lpage>. <pub-id pub-id-type="doi">10.1016/s0926-6410(98)00022-6</pub-id> <pub-id pub-id-type="pmid">9774725</pub-id></mixed-citation></ref>
<ref id="B14"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cohen, Cross</surname> <given-names>E. S.</given-names></name> <name><surname>Tunik</surname> <given-names>E.</given-names></name> <name><surname>Grafton</surname> <given-names>S. T.</given-names></name> <name><surname>Culham</surname> <given-names>J. C.</given-names></name></person-group> (<year>2009</year>). <article-title>Ventral and dorsal stream contributions to the online control of immediate and delayed grasping: a TMS approach.</article-title> <source><italic>Neuropsychologia</italic></source> <volume>47</volume> <fpage>1553</fpage>&#x2013;<lpage>1562</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2008.12.034</pub-id> <pub-id pub-id-type="pmid">19168086</pub-id></mixed-citation></ref>
<ref id="B15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cox</surname> <given-names>R. T.</given-names></name></person-group> (<year>1946</year>). <article-title>Probability, frequency and reasonable expectation.</article-title> <source><italic>Am. J. Phys</italic>.</source> <volume>17</volume> <fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1119/1.1990764</pub-id></mixed-citation></ref>
<ref id="B16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Damasse</surname> <given-names>J. B.</given-names></name> <name><surname>Perrinet</surname> <given-names>L. U.</given-names></name> <name><surname>Madelain</surname> <given-names>L.</given-names></name> <name><surname>Montagnini</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>Reinforcement effects in anticipatory smooth eye movements.</article-title> <source><italic>J. Vis.</italic></source> <volume>18</volume>:<fpage>14</fpage>. <pub-id pub-id-type="doi">10.1167/18.11.14</pub-id> <pub-id pub-id-type="pmid">30347101</pub-id></mixed-citation></ref>
<ref id="B17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Desanghere</surname></name> <name><surname>Marotta</surname> <given-names>J. J.</given-names></name></person-group> (<year>2011</year>). <article-title>&#x201C;Graspability&#x201D; of objects affects gaze patterns during perception and action tasks.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>212</volume> <fpage>177</fpage>&#x2013;<lpage>187</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-011-2716-x</pub-id> <pub-id pub-id-type="pmid">21597930</pub-id></mixed-citation></ref>
<ref id="B18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Diaz</surname> <given-names>G.</given-names></name> <name><surname>Cooper</surname> <given-names>J.</given-names></name> <name><surname>Rothkopf</surname> <given-names>C.</given-names></name> <name><surname>Hayhoe</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Saccades to future ball location reveal memory-based prediction in a virtual-reality interception task.</article-title> <source><italic>J. Vis.</italic></source> <volume>13</volume>:<fpage>20</fpage>. <pub-id pub-id-type="doi">10.1167/13.1.20</pub-id> <pub-id pub-id-type="pmid">23325347</pub-id></mixed-citation></ref>
<ref id="B19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Donato</surname> <given-names>R.</given-names></name> <name><surname>Pavan</surname> <given-names>A.</given-names></name> <name><surname>Campana</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Investigating the interaction between form and motion processing: a review of basic research and clinical evidence.</article-title> <source><italic>Front. Psychol.</italic></source> <volume>11</volume>:<fpage>566848</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2020.566848</pub-id> <pub-id pub-id-type="pmid">33192845</pub-id></mixed-citation></ref>
<ref id="B20"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dressing, Nitschke</surname> <given-names>K.</given-names></name> <name><surname>K&#x00FC;mmerer</surname> <given-names>D.</given-names></name> <name><surname>Bormann</surname> <given-names>T.</given-names></name> <name><surname>Beume</surname> <given-names>L.</given-names></name> <name><surname>Schmidt</surname> <given-names>C. S. M.</given-names></name><etal/></person-group> (<year>2018</year>). <article-title>Distinct contributions of dorsal and ventral streams to imitation of tool-use and communicative gestures.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>28</volume> <fpage>474</fpage>&#x2013;<lpage>492</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhw383</pub-id> <pub-id pub-id-type="pmid">27909000</pub-id></mixed-citation></ref>
<ref id="B21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Elliott</surname> <given-names>D.</given-names></name> <name><surname>Helsen</surname> <given-names>W. F.</given-names></name> <name><surname>Chua</surname> <given-names>R.</given-names></name></person-group> (<year>2001</year>). <article-title>A century later: Woodworth&#x2019;s (1899) two-component model of goal-directed aiming.</article-title> <source><italic>Psychol. Bull.</italic></source> <volume>127</volume> <fpage>342</fpage>&#x2013;<lpage>357</lpage>. <pub-id pub-id-type="doi">10.1037/0033-2909.127.3.342</pub-id> <pub-id pub-id-type="pmid">11393300</pub-id></mixed-citation></ref>
<ref id="B22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fiehler</surname> <given-names>K.</given-names></name> <name><surname>Brenner</surname> <given-names>E.</given-names></name> <name><surname>Spering</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Prediction in goal-directed action.</article-title> <source><italic>Journal of vision</italic></source> <volume>19</volume> <issue>10</issue>. <pub-id pub-id-type="doi">10.1167/19.9.10</pub-id> <pub-id pub-id-type="pmid">31434106</pub-id></mixed-citation></ref>
<ref id="B23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fiser</surname> <given-names>J.</given-names></name> <name><surname>Aslin</surname> <given-names>R. N.</given-names></name></person-group> (<year>2002</year>). <article-title>Nonlinear partial differential equations and applications: from the cover: statistical learning of new visual feature combinations by infants.</article-title> <source><italic>Proc. Natl. Acad. Sci.</italic></source> <volume>99</volume> <fpage>15822</fpage>&#x2013;<lpage>15826</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.232472899</pub-id> <pub-id pub-id-type="pmid">12429858</pub-id></mixed-citation></ref>
<ref id="B24"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Freud</surname> <given-names>E.</given-names></name> <name><surname>Macdonald</surname> <given-names>S. N.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Quinlan</surname> <given-names>D. J.</given-names></name> <name><surname>Goodale</surname> <given-names>M. A.</given-names></name> <name><surname>Culham</surname> <given-names>J. C.</given-names></name></person-group> (<year>2018</year>). <article-title>Getting a grip on reality: grasping movements directed to real objects and images rely on dissociable neural representations.</article-title> <source><italic>Cortex</italic></source> <volume>98</volume> <fpage>34</fpage>&#x2013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.1016/j.cortex.2017.02.020</pub-id> <pub-id pub-id-type="pmid">28431740</pub-id></mixed-citation></ref>
<ref id="B25"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Goldman-Rakic</surname> <given-names>P. S.</given-names></name></person-group> (<year>1990</year>). <article-title>Cellular and circuit basis of working memory in prefrontal cortex of nonhuman primates.</article-title> <source><italic>Prog. Brain Res.</italic></source> <volume>85</volume> <fpage>325</fpage>&#x2013;<lpage>336</lpage>. <pub-id pub-id-type="doi">10.1016/s0079-6123(08)62688-6</pub-id> <pub-id pub-id-type="pmid">2094903</pub-id></mixed-citation></ref>
<ref id="B26"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Goldstein-Marcusohn</surname> <given-names>Y.</given-names></name> <name><surname>Asaad</surname> <given-names>R.</given-names></name> <name><surname>Asaad</surname> <given-names>L.</given-names></name> <name><surname>Freud</surname> <given-names>E.</given-names></name></person-group> (<year>2024</year>). <article-title>The large-scale organization of shape processing in the ventral and dorsal pathways is dissociable from attention.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>34</volume>:<fpage>bhae221</fpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhae221</pub-id> <pub-id pub-id-type="pmid">38832533</pub-id></mixed-citation></ref>
<ref id="B27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Goodale</surname> <given-names>M. A.</given-names></name></person-group> (<year>2011</year>). <article-title>Transforming vision into action.</article-title> <source><italic>Vis. Res.</italic></source> <volume>51</volume> <fpage>1567</fpage>&#x2013;<lpage>1587</lpage>. <pub-id pub-id-type="doi">10.1016/j.visres.2010.07.027</pub-id> <pub-id pub-id-type="pmid">20691202</pub-id></mixed-citation></ref>
<ref id="B28"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Goodale</surname> <given-names>M. A.</given-names></name></person-group> (<year>2013</year>). <article-title>Separate visual systems for perception and action: a framework for understanding cortical visual impairment.</article-title> <source><italic>Dev. Med. Child Neurol.</italic></source> <volume>55</volume> (<issue>Suppl. 4</issue>), <fpage>9</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1111/dmcn.12299</pub-id> <pub-id pub-id-type="pmid">24237272</pub-id></mixed-citation></ref>
<ref id="B29"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Goodale</surname> <given-names>M. A.</given-names></name> <name><surname>Milner</surname> <given-names>A. D.</given-names></name></person-group> (<year>1992</year>). <article-title>Separate visual pathways for perception and action.</article-title> <source><italic>Trends Neurosci.</italic></source> <volume>15</volume> <fpage>20</fpage>&#x2013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1016/0166-2236(92)90344-8</pub-id> <pub-id pub-id-type="pmid">1374953</pub-id></mixed-citation></ref>
<ref id="B30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Goodale</surname> <given-names>M. A.</given-names></name> <name><surname>Milner</surname> <given-names>A. D.</given-names></name> <name><surname>Jakobson</surname> <given-names>L. S.</given-names></name> <name><surname>Carey</surname> <given-names>D. P.</given-names></name></person-group> (<year>1991</year>). <article-title>A neurological dissociation between perceiving objects and grasping them.</article-title> <source><italic>Nature</italic></source> <volume>349</volume> <fpage>154</fpage>&#x2013;<lpage>156</lpage>. <pub-id pub-id-type="doi">10.1038/349154a0</pub-id>. <pub-id pub-id-type="pmid">1986306</pub-id></mixed-citation></ref>
<ref id="B31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hayhoe</surname> <given-names>M. M.</given-names></name> <name><surname>McKinney</surname> <given-names>T.</given-names></name> <name><surname>Chajka</surname> <given-names>K.</given-names></name> <name><surname>Pelz</surname> <given-names>J. B.</given-names></name></person-group> (<year>2012</year>). <article-title>Predictive eye movements in natural vision.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>217</volume> <fpage>125</fpage>&#x2013;<lpage>136</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-011-2979-2</pub-id> <pub-id pub-id-type="pmid">22183755</pub-id></mixed-citation></ref>
<ref id="B32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hesse</surname> <given-names>C.</given-names></name> <name><surname>Miller</surname> <given-names>L.</given-names></name> <name><surname>Buckingham</surname> <given-names>G.</given-names></name></person-group> (<year>2016</year>). <article-title>Visual information about object size and object position are retained differently in the visual brain: evidence from grasping studies.</article-title> <source><italic>Neuropsychologia</italic></source> <volume>91</volume> <fpage>531</fpage>&#x2013;<lpage>543</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2016.09.016</pub-id> <pub-id pub-id-type="pmid">27663865</pub-id></mixed-citation></ref>
<ref id="B33"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hung</surname> <given-names>J.</given-names></name> <name><surname>Driver</surname> <given-names>J.</given-names></name> <name><surname>Walsh</surname> <given-names>V.</given-names></name></person-group> (<year>2011</year>). <article-title>Visual selection and the human frontal eye fields: effects of frontal transcranial magnetic stimulation on partial report analyzed by Bundesen&#x2019;s theory of visual attention.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>31</volume> <fpage>15904</fpage>&#x2013;<lpage>15913</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.2626-11.2011</pub-id> <pub-id pub-id-type="pmid">22049433</pub-id></mixed-citation></ref>
<ref id="B34"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Izawa</surname> <given-names>J.</given-names></name> <name><surname>Shadmehr</surname> <given-names>R.</given-names></name></person-group> (<year>2008</year>). <article-title>On-line processing of uncertain information in visuomotor control.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>28</volume> <fpage>11360</fpage>&#x2013;<lpage>11368</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.3063-08.2008</pub-id> <pub-id pub-id-type="pmid">18971478</pub-id></mixed-citation></ref>
<ref id="B35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jakobson</surname> <given-names>L. S.</given-names></name> <name><surname>Archibald</surname> <given-names>Y. M.</given-names></name> <name><surname>Carey</surname> <given-names>D. P.</given-names></name> <name><surname>Goodale</surname> <given-names>M. A.</given-names></name></person-group> (<year>1991</year>). <article-title>A kinematic analysis of reaching and grasping movements in a patient recovering from optic ataxia.</article-title> <source><italic>Neuropsychologia</italic></source> <volume>29</volume> <fpage>803</fpage>&#x2013;<lpage>809</lpage>. <pub-id pub-id-type="doi">10.1016/0028-3932(91)90073-H</pub-id> <pub-id pub-id-type="pmid">1944879</pub-id></mixed-citation></ref>
<ref id="B36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Janssen</surname> <given-names>P.</given-names></name> <name><surname>Verhoef</surname> <given-names>B. E.</given-names></name> <name><surname>Premereur</surname> <given-names>E.</given-names></name></person-group> (<year>2018</year>). <article-title>Functional interactions between the macaque dorsal and ventral visual pathways during three-dimensional object vision.</article-title> <source><italic>Cortex</italic></source> <volume>98</volume> <fpage>218</fpage>&#x2013;<lpage>227</lpage>. <pub-id pub-id-type="doi">10.1016/j.cortex.2017.01.021</pub-id> <pub-id pub-id-type="pmid">28258716</pub-id></mixed-citation></ref>
<ref id="B37"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jax</surname> <given-names>S. A.</given-names></name> <name><surname>Rosenbaum</surname> <given-names>D. A.</given-names></name></person-group> (<year>2007</year>). <article-title>Hand path priming in manual obstacle avoidance: evidence that the dorsal stream does not only control visually guided actions in real time.</article-title> <source><italic>J. Exp. Psychol.</italic></source> <volume>33</volume> <fpage>425</fpage>&#x2013;<lpage>441</lpage>. <pub-id pub-id-type="doi">10.1037/0096-1523.33.2.425</pub-id> <pub-id pub-id-type="pmid">17469977</pub-id></mixed-citation></ref>
<ref id="B38"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jax</surname> <given-names>S. A.</given-names></name> <name><surname>Rosenbaum</surname> <given-names>D. A.</given-names></name></person-group> (<year>2009</year>). <article-title>Hand path priming in manual obstacle avoidance: rapid decay of dorsal stream information.</article-title> <source><italic>Neuropsychologia</italic></source> <volume>47</volume> <fpage>1573</fpage>&#x2013;<lpage>1577</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2008.05.019</pub-id> <pub-id pub-id-type="pmid">18597796</pub-id></mixed-citation></ref>
<ref id="B39"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Keizer</surname> <given-names>A. W.</given-names></name> <name><surname>Colzato</surname> <given-names>L. S.</given-names></name> <name><surname>Hommel</surname> <given-names>B.</given-names></name></person-group> (<year>2008</year>). <article-title>Integrating faces, houses, motion, and action: spontaneous binding across ventral and dorsal processing streams.</article-title> <source><italic>Acta Psychol.</italic></source> <volume>127</volume> <fpage>177</fpage>&#x2013;<lpage>185</lpage>. <pub-id pub-id-type="doi">10.1016/j.actpsy.2007.04.003</pub-id> <pub-id pub-id-type="pmid">17555697</pub-id></mixed-citation></ref>
<ref id="B40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Koller</surname> <given-names>M.</given-names></name></person-group> (<year>2016</year>). <article-title>robustlmm: an R package for robust estimation of linear mixed-effects models.</article-title> <source><italic>J. Stat. Softw.</italic></source> <volume>75</volume> <fpage>1</fpage>&#x2013;<lpage>24</lpage>. <pub-id pub-id-type="doi">10.18637/jss.v075.i06</pub-id></mixed-citation></ref>
<ref id="B41"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>K&#x00F6;rding</surname> <given-names>K. P.</given-names></name> <name><surname>Wolpert</surname> <given-names>D. M.</given-names></name></person-group> (<year>2004</year>). <article-title>Bayesian integration in sensorimotor learning.</article-title> <source><italic>Nature</italic></source> <volume>427</volume> <fpage>244</fpage>&#x2013;<lpage>247</lpage>. <pub-id pub-id-type="doi">10.1038/nature02169</pub-id> <pub-id pub-id-type="pmid">14724638</pub-id></mixed-citation></ref>
<ref id="B42"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>K&#x00F6;rding</surname> <given-names>K. P.</given-names></name> <name><surname>Wolpert</surname> <given-names>D. M.</given-names></name></person-group> (<year>2006</year>). <article-title>Probabilistic mechanisms in sensorimotor control.</article-title> <source><italic>Novartis Found. Symp.</italic></source> <volume>270</volume> <fpage>191</fpage>&#x2013;<lpage>237</lpage>. <pub-id pub-id-type="doi">10.1002/9780470034989.ch15</pub-id></mixed-citation></ref>
<ref id="B43"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kowler</surname> <given-names>E.</given-names></name></person-group> (<year>1989</year>). <article-title>Cognitive expectations, not habits, control anticipatory smooth oculomotor pursuit.</article-title> <source><italic>Vis. Res.</italic></source> <volume>29</volume> <fpage>1049</fpage>&#x2013;<lpage>1057</lpage>. <pub-id pub-id-type="doi">10.1016/0042-6989(89)90052-7</pub-id> <pub-id pub-id-type="pmid">2617852</pub-id></mixed-citation></ref>
<ref id="B44"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krauzlis</surname> <given-names>R. J.</given-names></name> <name><surname>Miles</surname> <given-names>F. A.</given-names></name></person-group> (<year>1996</year>). <article-title>Decreases in the latency of smooth pursuit and saccadic eye movements produced by the &#x201C;gap paradigm&#x201D; in the monkey.</article-title> <source><italic>Vis. Res.</italic></source> <volume>36</volume> <fpage>1973</fpage>&#x2013;<lpage>1985</lpage>. <pub-id pub-id-type="doi">10.1016/0042-6989(95)00307-x</pub-id> <pub-id pub-id-type="pmid">8759437</pub-id></mixed-citation></ref>
<ref id="B45"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kravitz</surname> <given-names>D. J.</given-names></name> <name><surname>Saleem</surname> <given-names>K. S.</given-names></name> <name><surname>Baker</surname> <given-names>C. I.</given-names></name> <name><surname>Mishkin</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>A new neural framework for visuospatial processing.</article-title> <source><italic>Nat. Rev. Neurosci.</italic></source> <volume>12</volume> <fpage>217</fpage>&#x2013;<lpage>230</lpage>. <pub-id pub-id-type="doi">10.1038/nrn3008</pub-id> <pub-id pub-id-type="pmid">21415848</pub-id></mixed-citation></ref>
<ref id="B46"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krekelberg</surname> <given-names>B.</given-names></name> <name><surname>Lappe</surname> <given-names>M.</given-names></name></person-group> (<year>1999</year>). <article-title>Temporal recruitment along the trajectory of moving objects and the perception of position.</article-title> <source><italic>Vis. Res.</italic></source> <volume>39</volume> <fpage>2669</fpage>&#x2013;<lpage>2679</lpage>. <pub-id pub-id-type="doi">10.1016/S0042-6989(98)00287-9</pub-id> <pub-id pub-id-type="pmid">10492829</pub-id></mixed-citation></ref>
<ref id="B47"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Langridge</surname> <given-names>R. W.</given-names></name> <name><surname>Marotta</surname> <given-names>J. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Grasping occluded targets: investigating the influence of target visibility, allocentric cue presence, and direction of motion on gaze and grasp accuracy.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>235</volume> <fpage>2705</fpage>&#x2013;<lpage>2716</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-017-5004-6</pub-id> <pub-id pub-id-type="pmid">28597294</pub-id></mixed-citation></ref>
<ref id="B48"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Langridge</surname> <given-names>R. W.</given-names></name> <name><surname>Marotta</surname> <given-names>J. J.</given-names></name></person-group> (<year>2020</year>). <article-title>Grasping a 2D virtual target: the influence of target position and movement on gaze and digit placement.</article-title> <source><italic>Hum. Move. Sci.</italic></source> <volume>71</volume>:<fpage>102625</fpage>. <pub-id pub-id-type="doi">10.1016/j.humov.2020.102625</pub-id> <pub-id pub-id-type="pmid">32452441</pub-id></mixed-citation></ref>
<ref id="B49"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Langridge</surname> <given-names>R. W.</given-names></name> <name><surname>Marotta</surname> <given-names>J. J.</given-names></name></person-group> (<year>2021</year>). <article-title>Manipulation of physical 3-D and virtual 2-D stimuli: comparing digit placement and fixation position.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>239</volume> <fpage>1863</fpage>&#x2013;<lpage>1875</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-021-06101-z</pub-id> <pub-id pub-id-type="pmid">33860822</pub-id></mixed-citation></ref>
<ref id="B50"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Malla</surname> <given-names>C.</given-names></name> <name><surname>L&#x00F3;pez-Moliner</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <article-title>Predictive plus online visual information optimizes temporal precision in interception.</article-title> <source><italic>J. Exp. Psychol.</italic></source> <volume>41</volume> <fpage>1271</fpage>&#x2013;<lpage>1280</lpage>. <pub-id pub-id-type="doi">10.1037/xhp0000075</pub-id> <pub-id pub-id-type="pmid">26076178</pub-id></mixed-citation></ref>
<ref id="B51"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Masters</surname> <given-names>R.</given-names></name> <name><surname>Maxwell</surname> <given-names>J.</given-names></name></person-group> (<year>2008</year>). <article-title>The theory of reinvestment.</article-title> <source><italic>Int. Rev. Sport Exerc. Psychol.</italic></source> <volume>1</volume> <fpage>160</fpage>&#x2013;<lpage>183</lpage>. <pub-id pub-id-type="doi">10.1080/17509840802287218</pub-id></mixed-citation></ref>
<ref id="B52"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mennie</surname> <given-names>N.</given-names></name> <name><surname>Hayhoe</surname> <given-names>M.</given-names></name> <name><surname>Sullivan</surname> <given-names>B.</given-names></name></person-group> (<year>2007</year>). <article-title>Look-ahead fixations: anticipatory eye movements in natural tasks.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>179</volume> <fpage>427</fpage>&#x2013;<lpage>442</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-006-0804-0</pub-id> <pub-id pub-id-type="pmid">17171337</pub-id></mixed-citation></ref>
<ref id="B53"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Milner</surname> <given-names>D.</given-names></name> <name><surname>Goodale</surname> <given-names>M.</given-names></name></person-group> (<year>2006</year>). <source><italic>The Visual Brain in Action.</italic></source> <publisher-loc>Oxford</publisher-loc>: <publisher-name>Oxford University Press</publisher-name>.</mixed-citation></ref>
<ref id="B54"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Miyashita</surname> <given-names>Y.</given-names></name></person-group> (<year>1993</year>). <article-title>Inferior temporal cortex: where visual perception meets memory.</article-title> <source><italic>Annu. Rev. Neurosci.</italic></source> <volume>16</volume> <fpage>245</fpage>&#x2013;<lpage>263</lpage>. <pub-id pub-id-type="doi">10.1146/annurev.ne.16.030193.001333</pub-id> <pub-id pub-id-type="pmid">8460893</pub-id></mixed-citation></ref>
<ref id="B55"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monaco</surname> <given-names>S.</given-names></name> <name><surname>Malfatti</surname> <given-names>G.</given-names></name> <name><surname>Zendron</surname> <given-names>A.</given-names></name> <name><surname>Pellencin</surname> <given-names>E.</given-names></name> <name><surname>Turella</surname> <given-names>L.</given-names></name></person-group> (<year>2019</year>). <article-title>Predictive coding of action intentions in dorsal and ventral visual stream is based on visual anticipations, memory-based information and motor preparation.</article-title> <source><italic>Brain Struct. Funct.</italic></source> <volume>224</volume> <fpage>3291</fpage>&#x2013;<lpage>3308</lpage>. <pub-id pub-id-type="doi">10.1007/s00429-019-01970-1</pub-id> <pub-id pub-id-type="pmid">31673774</pub-id></mixed-citation></ref>
<ref id="B56"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Montagne</surname> <given-names>G.</given-names></name> <name><surname>Laurent</surname> <given-names>M.</given-names></name> <name><surname>Durey</surname> <given-names>A.</given-names></name> <name><surname>Bootsma</surname> <given-names>R.</given-names></name></person-group> (<year>1999</year>). <article-title>Movement reversals in ball catching.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>129</volume> <fpage>87</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1007/s002210050939</pub-id> <pub-id pub-id-type="pmid">10550506</pub-id></mixed-citation></ref>
<ref id="B57"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Montagnini</surname> <given-names>A.</given-names></name> <name><surname>Souto</surname> <given-names>D.</given-names></name> <name><surname>Masson</surname> <given-names>G.</given-names></name></person-group> (<year>2010</year>). <article-title>Anticipatory eye-movements under uncertainty: a window onto the internal representation of a visuomotor prior.</article-title> <source><italic>J. Vis.</italic></source> <volume>10</volume>:<fpage>554</fpage>. <pub-id pub-id-type="doi">10.1167/10.7.554</pub-id></mixed-citation></ref>
<ref id="B58"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>M&#x00FC;ller</surname> <given-names>S.</given-names></name> <name><surname>Abernethy</surname> <given-names>B.</given-names></name> <name><surname>Farrow</surname> <given-names>D.</given-names></name></person-group> (<year>2006</year>). <article-title>How do world-class cricket batsmen anticipate a bowler&#x2019;s intention?</article-title> <source><italic>Q. J. Exp. Psychol.</italic></source> <volume>59</volume> <fpage>2162</fpage>&#x2013;<lpage>2186</lpage>. <pub-id pub-id-type="doi">10.1080/02643290600576595</pub-id> <pub-id pub-id-type="pmid">17095494</pub-id></mixed-citation></ref>
<ref id="B59"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>M&#x00FC;ller</surname> <given-names>S.</given-names></name> <name><surname>Abernethy</surname> <given-names>B.</given-names></name></person-group> (<year>2006</year>). <article-title>Batting with occluded vision: an in situ examination of the information pick-up and interceptive skills of high- and low-skilled cricket batsmen.</article-title> <source><italic>J. Sci. Med. Sport</italic></source> <volume>9</volume> <fpage>446</fpage>&#x2013;<lpage>458</lpage>. <pub-id pub-id-type="doi">10.1016/j.jsams.2006.03.029</pub-id> <pub-id pub-id-type="pmid">16713351</pub-id></mixed-citation></ref>
<ref id="B60"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Notaro</surname> <given-names>G.</given-names></name> <name><surname>van Zoest</surname> <given-names>W.</given-names></name> <name><surname>Altman</surname> <given-names>M.</given-names></name> <name><surname>Melcher</surname> <given-names>D.</given-names></name> <name><surname>Hasson</surname> <given-names>U.</given-names></name></person-group> (<year>2019</year>). <article-title>Predictions as a window into learning: anticipatory fixation offsets carry more information about environmental statistics than reactive stimulus-responses.</article-title> <source><italic>J. Vis.</italic></source> <volume>19</volume>:<fpage>8</fpage>. <pub-id pub-id-type="doi">10.1167/19.2.8</pub-id> <pub-id pub-id-type="pmid">30779844</pub-id></mixed-citation></ref>
<ref id="B61"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Nowak</surname> <given-names>D. A.</given-names></name> <name><surname>Timmann</surname> <given-names>D.</given-names></name></person-group> <collab>Joachim Hermsd&#x00F6;rfer</collab>. (<year>2021</year>). <source><italic>Deficits of Grasping in Cerebellar Disorders.</italic></source> <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>. <pub-id pub-id-type="doi">10.1007/978-3-030-23810-0_73</pub-id></mixed-citation></ref>
<ref id="B62"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oldfield</surname> <given-names>R. C.</given-names></name></person-group> (<year>1971</year>). <article-title>The assessment and analysis of handedness: the Edinburgh inventory.</article-title> <source><italic>Neuropsychologia</italic></source> <volume>9</volume> <fpage>97</fpage>&#x2013;<lpage>113</lpage>. <pub-id pub-id-type="doi">10.1016/0028-3932(71)90067-4</pub-id> <pub-id pub-id-type="pmid">5146491</pub-id></mixed-citation></ref>
<ref id="B63"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Orban de Xivry</surname> <given-names>J. J.</given-names></name> <name><surname>Lef&#x00E8;vre</surname> <given-names>P.</given-names></name></person-group> (<year>2007</year>). <article-title>Saccades and pursuit: two outcomes of a single sensorimotor process.</article-title> <source><italic>J. Physiol.</italic></source> <volume>584</volume>(<issue>Pt 1</issue>), <fpage>11</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1113/jphysiol.2007.139881</pub-id> <pub-id pub-id-type="pmid">17690138</pub-id></mixed-citation></ref>
<ref id="B64"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Paninski</surname> <given-names>L.</given-names></name> <name><surname>Fellows</surname> <given-names>M.</given-names></name> <name><surname>Hatsopoulos</surname> <given-names>N.</given-names></name> <name><surname>Donoghue</surname> <given-names>J.</given-names></name></person-group> (<year>2004</year>). <article-title>Spatiotemporal tuning of motor cortical neurons for hand position and velocity.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>91</volume> <fpage>515</fpage>&#x2013;<lpage>532</lpage>. <pub-id pub-id-type="doi">10.1152/JN.00587.2002</pub-id> <pub-id pub-id-type="pmid">13679402</pub-id></mixed-citation></ref>
<ref id="B65"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pasturel</surname> <given-names>C.</given-names></name> <name><surname>Montagnini</surname> <given-names>A.</given-names></name> <name><surname>Perrinet</surname> <given-names>L. U.</given-names></name></person-group> (<year>2020</year>). <article-title>Humans adapt their anticipatory eye movements to the volatility of visual motion properties.</article-title> <source><italic>PLoS Comput. Biol.</italic></source> <volume>16</volume>:<fpage>e1007438</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1007438</pub-id> <pub-id pub-id-type="pmid">32282790</pub-id></mixed-citation></ref>
<ref id="B66"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Perry</surname> <given-names>C. J.</given-names></name> <name><surname>Fallah</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>Feature integration and object representations along the dorsal stream visual hierarchy.</article-title> <source><italic>Front. Comput. Neurosci.</italic></source> <volume>8</volume>:<fpage>84</fpage>. <pub-id pub-id-type="doi">10.3389/fncom.2014.00084</pub-id> <pub-id pub-id-type="pmid">25140147</pub-id></mixed-citation></ref>
<ref id="B67"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Salvucci</surname></name> <name><surname>Goldberg</surname> <given-names>J.</given-names></name></person-group> (<year>2000</year>). &#x201C;<article-title>Identifying fixations and saccades in eye-tracking protocols</article-title>,&#x201D; in <source><italic>Proceedings of the 2000 Symposium on Eye Tracking Research and Applications</italic></source>, (<publisher-loc>Palm Beach Gardens, FL</publisher-loc>), <fpage>71</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1145/355017.355028</pub-id></mixed-citation></ref>
<ref id="B68"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Santos</surname> <given-names>E. M.</given-names></name> <name><surname>Kowler</surname> <given-names>E.</given-names></name></person-group> (<year>2017</year>). <article-title>Anticipatory smooth pursuit eye movements evoked by probabilistic cues.</article-title> <source><italic>J. Vis.</italic></source> <volume>17</volume>:<fpage>13</fpage>. <pub-id pub-id-type="doi">10.1167/17.13.13</pub-id> <pub-id pub-id-type="pmid">29181503</pub-id></mixed-citation></ref>
<ref id="B69"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saunders</surname> <given-names>J. A.</given-names></name> <name><surname>Knill</surname> <given-names>D. C.</given-names></name></person-group> (<year>2004</year>). <article-title>Visual feedback control of hand movements.</article-title> <source><italic>J. Neurosci.</italic></source> <volume>24</volume> <fpage>3223</fpage>&#x2013;<lpage>3234</lpage>. <pub-id pub-id-type="doi">10.1523/jneurosci.4319-03.2004</pub-id> <pub-id pub-id-type="pmid">15056701</pub-id></mixed-citation></ref>
<ref id="B70"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schwettmann</surname> <given-names>S.</given-names></name> <name><surname>Tenenbaum</surname> <given-names>J. B.</given-names></name> <name><surname>Kanwisher</surname> <given-names>N.</given-names></name></person-group> (<year>2019</year>). <article-title>Invariant representations of mass in the human brain.</article-title> <source><italic>eLife</italic></source> <volume>8</volume>:<fpage>e46619</fpage>. <pub-id pub-id-type="doi">10.7554/eLife.46619</pub-id> <pub-id pub-id-type="pmid">31845887</pub-id></mixed-citation></ref>
<ref id="B71"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sim&#x00F3;</surname> <given-names>L. S.</given-names></name> <name><surname>Krisky</surname> <given-names>C. M.</given-names></name> <name><surname>Sweeney</surname> <given-names>J. A.</given-names></name></person-group> (<year>2005</year>). <article-title>Functional neuroanatomy of anticipatory behavior: dissociation between sensory-driven and memory-driven systems.</article-title> <source><italic>Cereb. Cortex</italic></source> <volume>15</volume> <fpage>1982</fpage>&#x2013;<lpage>1991</lpage>. <pub-id pub-id-type="doi">10.1093/cercor/bhi073</pub-id> <pub-id pub-id-type="pmid">15758195</pub-id></mixed-citation></ref>
<ref id="B72"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singhal, Monaco</surname> <given-names>S.</given-names></name> <name><surname>Kaufman</surname> <given-names>L. D.</given-names></name> <name><surname>Culham</surname> <given-names>J. C.</given-names></name></person-group> (<year>2013</year>). <article-title>Human fMRI reveals that delayed action re-recruits visual perception.</article-title> <source><italic>PLoS One</italic></source> <volume>8</volume>:<fpage>e73629</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0073629</pub-id> <pub-id pub-id-type="pmid">24040007</pub-id></mixed-citation></ref>
<ref id="B73"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Smith</surname> <given-names>A. K.</given-names></name> <name><surname>Szelest</surname> <given-names>I.</given-names></name> <name><surname>Friedrich</surname> <given-names>T. E.</given-names></name> <name><surname>Elias</surname> <given-names>L. J.</given-names></name></person-group> (<year>2015</year>). <article-title>Native reading direction influences lateral biases in the perception of shape from shading.</article-title> <source><italic>Laterality</italic></source> <volume>20</volume> <fpage>418</fpage>&#x2013;<lpage>433</lpage>. <pub-id pub-id-type="doi">10.1080/1357650X.2014.990975</pub-id> <pub-id pub-id-type="pmid">25537526</pub-id></mixed-citation></ref>
<ref id="B74"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Spering</surname> <given-names>M.</given-names></name> <name><surname>Sch&#x00FC;tz</surname> <given-names>A.</given-names></name> <name><surname>Braun</surname> <given-names>D.</given-names></name> <name><surname>Gegenfurtner</surname> <given-names>K.</given-names></name></person-group> (<year>2011</year>). <article-title>Keep your eyes on the ball: smooth pursuit eye movements enhance prediction of visual motion.</article-title> <source><italic>J. Neurophysiol.</italic></source> <volume>105</volume> <fpage>1756</fpage>&#x2013;<lpage>1767</lpage>. <pub-id pub-id-type="doi">10.1152/jn.00344.2010</pub-id> <pub-id pub-id-type="pmid">21289135</pub-id></mixed-citation></ref>
<ref id="B75"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Thiebaut de Schotten</surname> <given-names>M.</given-names></name> <name><surname>Dell&#x2019;Acqua</surname> <given-names>F.</given-names></name> <name><surname>Forkel</surname> <given-names>S. J.</given-names></name> <name><surname>Simmons</surname> <given-names>A.</given-names></name> <name><surname>Vergani</surname> <given-names>F.</given-names></name></person-group> (<year>2011</year>). <article-title>A lateralized brain network for visuospatial attention.</article-title> <source><italic>Nat. Neurosci.</italic></source> <volume>14</volume> <fpage>1245</fpage>&#x2013;<lpage>1246</lpage>. <pub-id pub-id-type="doi">10.1038/nn.2905</pub-id> <pub-id pub-id-type="pmid">21926985</pub-id></mixed-citation></ref>
<ref id="B76"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Thulasiram</surname> <given-names>M. R.</given-names></name> <name><surname>Langridge</surname> <given-names>R. W.</given-names></name> <name><surname>Abbas</surname> <given-names>H. H.</given-names></name> <name><surname>Marotta</surname> <given-names>J. J.</given-names></name></person-group> (<year>2020</year>). <article-title>Eye&#x2013;hand coordination in reaching and grasping vertically moving targets.</article-title> <source><italic>Exp. Brain Res.</italic></source> <volume>238</volume> <fpage>1433</fpage>&#x2013;<lpage>1440</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-020-05826-7</pub-id> <pub-id pub-id-type="pmid">32382863</pub-id></mixed-citation></ref>
<ref id="B77"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Todd</surname> <given-names>J. T.</given-names></name></person-group> (<year>1981</year>). <article-title>Visual information about moving objects.</article-title> <source><italic>J. Exp. Psychol. Hum. Percept. Perform.</italic></source> <volume>7</volume> <fpage>975</fpage>&#x2013;<lpage>810</lpage>. <pub-id pub-id-type="doi">10.1037/0096-1523.7.4.795</pub-id></mixed-citation></ref>
<ref id="B78"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Turk-Browne</surname> <given-names>N. B.</given-names></name></person-group> (<year>2012</year>). <article-title>Statistical learning and its consequences.</article-title> <source><italic>Nebraska Symp. Motiv.</italic></source> <volume>59</volume> <fpage>117</fpage>&#x2013;<lpage>146</lpage>. <pub-id pub-id-type="doi">10.1007/978-1-4614-4794-8_6</pub-id> <pub-id pub-id-type="pmid">23437632</pub-id></mixed-citation></ref>
<ref id="B79"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Van Dromme</surname> <given-names>I. C.</given-names></name> <name><surname>Premereur</surname> <given-names>E.</given-names></name> <name><surname>Verhoef</surname> <given-names>B. E.</given-names></name> <name><surname>Vanduffel</surname> <given-names>W.</given-names></name> <name><surname>Janssen</surname> <given-names>P.</given-names></name></person-group> (<year>2016</year>). <article-title>Posterior parietal cortex drives inferotemporal activations during three-dimensional object vision.</article-title> <source><italic>PLoS Biol.</italic></source> <volume>14</volume>:<fpage>e1002445</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pbio.1002445</pub-id> <pub-id pub-id-type="pmid">27082854</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/311234/overview">Walid Briki</ext-link>, Center Hospitalier de Grasse, France</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/79724/overview">Luisa Sartori</ext-link>, University of Padua, Italy</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/100757/overview">Anna Montagnini</ext-link>, Center National de la Recherche Scientifique (CNRS), France</p></fn>
</fn-group>
</back>
</article>