<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neuroergonomics</journal-id>
<journal-title>Frontiers in Neuroergonomics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neuroergonomics</abbrev-journal-title>
<issn pub-type="epub">2673-6195</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnrgo.2025.1535799</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Neuroergonomics</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Evaluating robotic actions: spatiotemporal brain dynamics of performance assessment in robot-assisted laparoscopic training</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Lingelbach</surname> <given-names>Katharina</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1180924/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Rips</surname> <given-names>Jennifer</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2928910/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Karstensen</surname> <given-names>Lennart</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2391016/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Mathis-Ullrich</surname> <given-names>Franziska</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2962535/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Vukeli&#x00107;</surname> <given-names>Mathias</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/171128/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Applied Neurocognitive Systems, Fraunhofer Institute for Industrial Engineering IAO</institution>, <addr-line>Stuttgart</addr-line>, <country>Germany</country></aff>
<aff id="aff2"><sup>2</sup><institution>Applied Neurocognitive Psychology, Department of Psychology, Carl von Ossietzky University</institution>, <addr-line>Oldenburg</addr-line>, <country>Germany</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department Artificial Intelligence in Biomedical Engineering, Friedrich-Alexander-University</institution>, <addr-line>Erlangen</addr-line>, <country>Germany</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Frederic Dehais, Institut Sup&#x000E9;rieur de l&#x00027;A&#x000E9;ronautique et de l&#x00027;Espace (ISAE-SUPAERO), France</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Ahmet Omurtag, Nottingham Trent University, United Kingdom</p>
<p>Daniel Callan, Advanced Telecommunications Research Institute International (ATR), Japan</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Katharina Lingelbach <email>katharina.lingelbach&#x00040;iao.fraunhofer.de</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>19</day>
<month>02</month>
<year>2025</year>
</pub-date>
<pub-date pub-type="collection">
<year>2025</year>
</pub-date>
<volume>6</volume>
<elocation-id>1535799</elocation-id>
<history>
<date date-type="received">
<day>27</day>
<month>11</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>30</day>
<month>01</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2025 Lingelbach, Rips, Karstensen, Mathis-Ullrich and Vukeli&#x00107;.</copyright-statement>
<copyright-year>2025</copyright-year>
<copyright-holder>Lingelbach, Rips, Karstensen, Mathis-Ullrich and Vukeli&#x00107;</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Enhancing medical robot training traditionally relies on explicit feedback from physicians to identify optimal and suboptimal robotic actions during surgery. Passive brain-computer interfaces (BCIs) offer an emerging alternative by enabling implicit brain-based performance evaluations. However, effectively decoding these evaluations of robot performance requires a comprehensive understanding of the spatiotemporal brain dynamics identifying optimal and suboptimal robot actions within realistic settings.</p>
</sec>
<sec>
<title>Methods</title>
<p>We conducted an electroencephalographic study with 16 participants who mentally assessed the quality of robotic actions while observing simulated robot-assisted laparoscopic surgery scenarios designed to approximate real-world conditions. We aimed to identify key spatiotemporal dynamics using the surface Laplacian technique and two complementary data-driven methods: a mass-univariate permutation-based clustering and multivariate pattern analysis (MVPA)-based temporal decoding. A second goal was to identify the optimal time interval of evoked brain signatures for single-trial classification.</p>
</sec>
<sec>
<title>Results</title>
<p>Our analyses revealed three distinct spatiotemporal brain dynamics differentiating the quality assessment of optimal vs. suboptimal robotic actions during video-based laparoscopic training observations. Specifically, an enhanced left fronto-temporal current source, consistent with P300, LPP, and P600 components, indicated heightened attentional allocation and sustained evaluation processes during suboptimal robot actions. Additionally, amplified current sinks in right frontal and mid-occipito-parietal regions suggested prediction-based processing and conflict detection, consistent with the oERN and interaction-based ERN/N400. Both mass-univariate clustering and MVPA provided convergent evidence supporting these neural distinctions.</p>
</sec>
<sec>
<title>Discussion</title>
<p>The identified neural signatures propose that suboptimal robotic actions elicit enhanced, sustained brain dynamics linked to continuous attention allocation, action monitoring, conflict detection, and ongoing evaluative processing. The findings highlight the importance of prioritizing late evaluative brain signatures in BCIs to classify robotic actions reliably. These insights have significant implications for advancing machine-learning-based training paradigms.</p>
</sec></abstract>
<kwd-group>
<kwd>robot training</kwd>
<kwd>performance monitoring</kwd>
<kwd>spatio-temporal clustering</kwd>
<kwd>temporal decoding</kwd>
<kwd>machine learning</kwd>
<kwd>electroencephalography (EEG)</kwd>
<kwd>passive brain-computer interfaces (BCIs)</kwd>
<kwd>current source density (CSD)</kwd>
</kwd-group>
<contract-num rid="cn001">(KI-Fortschrittszentrum Lernende Systeme und Kognitive Robotik</contract-num>
<contract-num rid="cn002">MINT-Innovationen P-2020-0116</contract-num>
<contract-sponsor id="cn001">Ministerium f&#x000FC;r Wirtschaft, Arbeit und Wohnungsbau Baden-W&#x000FC;rttemberg<named-content content-type="fundref-id">10.13039/501100011736</named-content></contract-sponsor>
<contract-sponsor id="cn002">Vector Stiftung<named-content content-type="fundref-id">10.13039/501100013912</named-content></contract-sponsor>
<counts>
<fig-count count="6"/>
<table-count count="0"/>
<equation-count count="0"/>
<ref-count count="73"/>
<page-count count="13"/>
<word-count count="9323"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cognitive Neuroergonomics</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>Current research is advancing the development and optimization of robotic systems capable of autonomously performing specialized tasks and providing adaptive assistance to support surgeons during various stages of procedures (Moustris et al., <xref ref-type="bibr" rid="B42">2011</xref>; Richter et al., <xref ref-type="bibr" rid="B56">2019</xref>; Thananjeyan et al., <xref ref-type="bibr" rid="B68">2017</xref>). These tasks include camera guidance (Pandya et al., <xref ref-type="bibr" rid="B49">2014</xref>), tissue clamping (Nguyen et al., <xref ref-type="bibr" rid="B43">2019</xref>), tissue manipulation (Scheikl et al., <xref ref-type="bibr" rid="B62">2024</xref>), and surgical knot tying (Osa et al., <xref ref-type="bibr" rid="B46">2014</xref>; Van Den Berg et al., <xref ref-type="bibr" rid="B70">2010</xref>).</p>
<p>Machine learning, particularly reinforcement learning, is well-suited for training robots efficiently, allowing them to learn tasks autonomously (Iturrate et al., <xref ref-type="bibr" rid="B28">2010</xref>; Vukeli&#x00107; et al., <xref ref-type="bibr" rid="B72">2023</xref>). A key challenge, however, lies in providing effective feedback to the reinforcement learning agent. The agent requires frequent and continuous evaluation of its actions via a reward function to distinguish between successful and unsuccessful outcomes. Reinforcement learning is typically trained in simulated environments using this reward function before being adapted to real-world settings for fine-tuning or deployment. The design of the reward function and the real-world fine-tuning both rely on the expertise of physicians. However, obtaining explicit feedback in the form of labels for robot actions from physicians is challenging, as it further burdens their already demanding workload.</p>
<p>Passive brain-computer interfaces (BCIs) offer a promising approach by enabling direct, implicit and continuous feedback loops in human-robot interactions (e.g., Aric&#x000F2; et al., <xref ref-type="bibr" rid="B3">2018</xref>; Protzak et al., <xref ref-type="bibr" rid="B55">2013</xref>), thereby alleviating the burden on physicians (Zander et al., <xref ref-type="bibr" rid="B73">2017</xref>). Brain signals elicited during the observation and mental assessment of robot actions can serve as an evaluation function for reinforcement learning models (Kim et al., <xref ref-type="bibr" rid="B31">2017</xref>; Vukeli&#x00107; et al., <xref ref-type="bibr" rid="B72">2023</xref>).</p>
<p>Previous studies on performance monitoring tasks, including those in BCI applications (Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; Iturrate et al., <xref ref-type="bibr" rid="B27">2015</xref>; Ehrlich and Cheng, <xref ref-type="bibr" rid="B13">2019</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B18">2005</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B19">2008</xref>; Kreilinger et al., <xref ref-type="bibr" rid="B33">2012</xref>; Sp&#x000FC;ler and Niethammer, <xref ref-type="bibr" rid="B66">2015</xref>), have shown that observing errors is associated with pronounced event-related potential (ERP) deflections, particularly in the following components (see Somon et al., <xref ref-type="bibr" rid="B64">2017</xref> for review): Across various tasks, an observation-based error-related negativity (oERN; Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>) has been consistently identified, resembling the ERN observed in self-generated errors (Gehring et al., <xref ref-type="bibr" rid="B20">1993</xref>; also referred to as error negativity (Ne) in early studies; Falkenstein et al., <xref ref-type="bibr" rid="B16">1991</xref>). However, the oERN peaks slightly later, between 250 and 270 ms, in frontocentral regions and is enhanced in response to erroneous actions (Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B18">2005</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B19">2008</xref>; Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>; Pavone et al., <xref ref-type="bibr" rid="B50">2016</xref>).</p>
<p>The oERN is sometimes followed by a frontocentral positivity known as error positivity (oPe), which responds to errors depending on contextual factors such as task engagement and error relevance. This component tends to be absent when another observed agent produces the error without relational impact or direct consequence for the observer (Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; van Schie et al., <xref ref-type="bibr" rid="B71">2004</xref>; Koban et al., <xref ref-type="bibr" rid="B32">2010</xref>; Padrao et al., <xref ref-type="bibr" rid="B47">2016</xref>). The oPe peaks between 350 and 450 ms and is thought to reflect conscious recognition and high-level evaluation of errors (Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B18">2005</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B19">2008</xref>; Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>; Pavone et al., <xref ref-type="bibr" rid="B50">2016</xref>).</p>
<p>Many of the studies on error monitoring in observed agents and systems (Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B18">2005</xref>; Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; Padrao et al., <xref ref-type="bibr" rid="B47">2016</xref>; Pavone et al., <xref ref-type="bibr" rid="B50">2016</xref>) have identified a further negative ERP deflection, likely linked to prediction violations and unexpected events. This monitoring-related ERP termed the interaction ERN by Ferrez and Mill&#x000E1;n (<xref ref-type="bibr" rid="B18">2005</xref>), peaks at frontocentral sites between 400 and 550 ms and is proposed to be related to the N400. Initially linked to semantic inconsistencies, the N400 typically peaks around 450 ms post-stimulus at centroparietal sites (Kutas and Hillyard, <xref ref-type="bibr" rid="B35">1980</xref>). However, it has also been observed in non-semantic contexts, such as unexpected outcomes in movement sequences, with a more frontocentral and temporoparietal distribution (Balconi and Vitaloni, <xref ref-type="bibr" rid="B4">2014</xref>).</p>
<p>Building on this foundation, promising results have emerged in training non-medical robots using these error-related ERPs (Iturrate et al., <xref ref-type="bibr" rid="B28">2010</xref>, <xref ref-type="bibr" rid="B27">2015</xref>; Kim et al., <xref ref-type="bibr" rid="B31">2017</xref>, <xref ref-type="bibr" rid="B30">2020</xref>; Luo et al., <xref ref-type="bibr" rid="B39">2018</xref>; Penaloza et al., <xref ref-type="bibr" rid="B51">2015</xref>; Salazar-Gomez et al., <xref ref-type="bibr" rid="B59">2017</xref>; Vukeli&#x00107; et al., <xref ref-type="bibr" rid="B72">2023</xref>). Despite these advances, the application of BCI-based training for medical robots in realistic scenarios remains scarce.</p>
<p>This study investigated evoked spatiotemporal dynamics associated with evaluating optimal and suboptimal robot actions during a robot-assisted laparoscopic simulation using electroencephalography (EEG). Our objectives were twofold: (a) to determine whether the spatiotemporal dynamics evoked by observing optimal and suboptimal robotic actions in near-naturalistic laparoscopic robot training videos resemble commonly reported error-related potentials, using two complementary analytical approaches; and (b) to identify the optimal time interval of these evoked brain signatures for single-trial classification, with potential application for feedback loops in BCI-driven reinforcement learning systems.</p>
</sec>
<sec sec-type="materials and methods" id="s2">
<title>2 Materials and methods</title>
<sec>
<title>2.1 Participants</title>
<p>Sixteen volunteers (<italic>M</italic><sub><italic>age</italic></sub> &#x0003D; 24.88 years, <italic>SD</italic> &#x0003D; 4.88, range: 19&#x02013;38 years, 14 females, two males) with no prior experience in surgical procedures participated in the study. Eligibility criteria included age between 18 and 40 years, right-handedness, absence of diagnosed neurological, physiological, or psychological disorders, no regular use of centrally acting substances, and no head implants or history of brain surgeries. Participants provided written informed consent before participation and received monetary compensation. The study complied with the Declaration of Helsinki and was approved by the University of T&#x000FC;bingen Ethics Committee (ID: 827/2020BO1).</p>
</sec>
<sec>
<title>2.2 Procedure</title>
<p>At the beginning of the experiment, EEG signals were recorded during a 2-min resting period while participants focused on a fixation cross with their eyes open. Participants subsequently undertook an evaluation task, requiring them to observe laparoscopic video sequences and mentally assess the quality of the robotic action depicted in each sequence.</p>
<p>The video sequences illustrated simulated tissue-cutting procedures performed by a robotic arm using a rod instead of a scalpel. These procedures were conducted on a phantom torso model with replicated organs, offering realistic representations of robotic actions in laparoscopic surgery. Light-emitting diodes (LEDs) were used to mark the target organ and tissue for the surgical cut. The target organs included the right kidney, stomach and spleen, each equipped with a single LED point sensor, and the left kidney, which was fitted with a line sensor consisting of a row of seven LEDs (<xref ref-type="fig" rid="F1">Figures 1A</xref>&#x02013;<xref ref-type="fig" rid="F1">D</xref>). The optimal action required the robot to press the rod with sufficient pressure onto the target organ for the point sensor and to move the rod along the organ&#x00027;s surface for the line sensor. If the robot applied adequate pressure to the marked tissue, the LEDs turned off (<xref ref-type="fig" rid="F1">Figure 1A</xref>). Conversely, if the tissue was missed or the pressure was insufficient, the LEDs remained fully or partially lit. Detailed information about the stimulus material and an illustrative overview video are provided in the <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>. The stimulus database is accessible upon request through the OSF repository at <ext-link ext-link-type="uri" xlink:href="https://osf.io/6ndsv/">https://osf.io/6ndsv/</ext-link>.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Excerpt from zoomed-in video sequences and laboratory setup. <bold>(A)</bold> Video sequence of the right kidney with a point sensor. The LED light turned off during the video, indicating the robot&#x00027;s action was successful. <bold>(B)</bold> Video sequence of the stomach with a point sensor. The sensor was not touched during the video, and the robot&#x00027;s action failed. <bold>(C)</bold> Video sequence of the spleen with a point sensor. The sensor was touched during the video, but the pressure was insufficient, resulting in a failed action. <bold>(D)</bold> Video sequence of the left kidney with a line sensor. The line sensor was not touched during the video, leading to a failed action. <bold>(E)</bold> Laboratory setup with a participant seated in front of the monitor and eye-tracking system, wearing a 64-channel EEG.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-06-1535799-g0001.tif"/>
</fig>
<p>Participants rated each robotic action as good or bad, giving verbal responses during the practice phase to confirm task comprehension, and conducting mental evaluations during the actual experiment. High classification accuracy of robot action assessments was confirmed in a preliminary behavioral study (<italic>N</italic> &#x0003D; 9; see <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref> for details).</p>
<p>Following a brief practice session consisting of 15 video sequences to familiarize them with the task and video material at the beginning of the experiment, participants were presented with 1,000 video sequences across 10 blocks. Each block included an overview video, a countdown, and randomized combination of 65 sequences showing optimal robotic actions and 35 showing suboptimal actions (100 sequences per block; <xref ref-type="fig" rid="F1">Figures 1</xref>, <xref ref-type="fig" rid="F2">2</xref>). A 1-min break followed each block. The overview video at the beginning of each block depicted a sequence of optimal and suboptimal robotic actions from two viewpoints (zoomed-out in the top left corner of the screen and zoomed-in in the bottom right corner of the screen), providing context for the medical scenario and upcoming 100 zoomed-in video sequences. Each zoomed-in video sequence contained a single robotic action. It lasted 1.5 s and was followed by a jittered interstimulus interval ranging from 0.75 to 1 s, during which a fixation cross appeared at the center of the screen (<xref ref-type="fig" rid="F2">Figure 2</xref>).</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Overview of an experimental block in the robot action evaluation task. The overview phase at the beginning of each block includes two camera views. The following countdown is displayed in German (English: &#x0201C;It&#x00027;s about to start 3 - 2 - 1&#x0201D;). ITI, interstimulus interval.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-06-1535799-g0002.tif"/>
</fig>
</sec>
<sec>
<title>2.3 Data acquisition and preprocessing</title>
<p>EEG potentials were recorded according to the international 10&#x02013;20 system with 64 electrodes and at a sampling rate of 1,000 Hz (actiCAP and BrainAmp, BrainProducts GmbH, Germany). The locations of the electrodes were Fp1, Fp2, Fz, AF3, AF4, AF7, AF8, F1, F2, F3, F4, F5, F6, F7, F8, FC1, FC2, FC3, FC4, FC5, FC6, FT7, FT8, FT9, FT10, Cz, C1, C2, C3, C4, C5, C6, T7, T8, CPz, CP1, CP2, CP3, CP4, CP5, CP6, TP7, TP8, TP9, TP10, Pz, P1, P2, P3, P4, P5, P6, P7, P8, POz, PO3, PO4, PO7, PO8, Oz, O1, O2, and Iz. The ground electrode was positioned on FPz and the reference electrode on FCz. Impedance of electrodes was kept below 25 <italic>k&#x003A9;</italic> at the beginning of the experiment.</p>
<p>All analyses were performed in Python and MNE Python (Gramfort et al., <xref ref-type="bibr" rid="B21">2014</xref>). The EEG signals were de-trended and bandpass filtered using a fourth-order infinite impulse response (IIR) Butterworth filter with cut-off frequencies of 0.2 and 10 Hz (see also Iturrate et al., <xref ref-type="bibr" rid="B28">2010</xref>, <xref ref-type="bibr" rid="B27">2015</xref>; Kim et al., <xref ref-type="bibr" rid="B31">2017</xref>, <xref ref-type="bibr" rid="B30">2020</xref>; Vukeli&#x00107; et al., <xref ref-type="bibr" rid="B72">2023</xref>). The signals were then segmented into 2.2-second epochs, each beginning 200 ms before the onset of each zoomed-in video sequence. Epoched data was subsequently down-sampled to 250 Hz. To remove cardiac, muscle, and ocular artifacts, epochs were cleaned using an independent component analysis (ICA; Chaumon et al., <xref ref-type="bibr" rid="B9">2015</xref>; Hipp and Siegel, <xref ref-type="bibr" rid="B25">2013</xref>; Lee et al., <xref ref-type="bibr" rid="B37">1999</xref>) within an automated pipeline called FASTER (Nolan et al., <xref ref-type="bibr" rid="B44">2010</xref>) as implemented in mne-python version 1.6.1 (Gramfort et al., <xref ref-type="bibr" rid="B21">2014</xref>). To generate an electro-oculography (EOG) surrogate for the ICA, a virtual EOG channel was constructed using the frontal Fp1 and Fp2 electrode signals. After cleaning the signals, the epochs were baseline corrected by subtracting the mean amplitude of the time interval before the video onset (200 ms) and bad channels were interpolated per epoch using a spline interpolation (Gramfort et al., <xref ref-type="bibr" rid="B21">2014</xref>; Nolan et al., <xref ref-type="bibr" rid="B44">2010</xref>). Finally, the reference-free current source density (CSD) transformation was applied to the data to enhance spatial resolution by minimizing volume conduction effects and estimating local electrical activity (current sources and sinks) at the scalp surface (Perrin et al., <xref ref-type="bibr" rid="B53">1989</xref>; Kayser and Tenke, <xref ref-type="bibr" rid="B29">2015</xref>).</p>
<p>CSD is a mathematical transformation of EEG signals that estimates local current sources and sinks across the cortical surface at the sensor level. By computing the second spatial derivative of the electric potential field, it determines the spatial distribution and direction of current flow. Notably, the number of output channels matches the input channels, as the transformation is applied directly to the data from each electrode without changing the input dimensionality. CSD distinguishes between current sources (positive polarity) and sinks (negative polarity). In a CSD map, a source indicates outward current flow from a cortical region, reflecting reduced excitatory postsynaptic potentials (EPSPs). In contrast, a sink represents inward current flow linked to increased EPSPs (Perrin et al., <xref ref-type="bibr" rid="B53">1989</xref>; Kayser and Tenke, <xref ref-type="bibr" rid="B29">2015</xref>). This approach offers a more localized and directly interpretable representation of neural activity than standard reference-dependent EEG potentials (Perrin et al., <xref ref-type="bibr" rid="B53">1989</xref>; Kayser and Tenke, <xref ref-type="bibr" rid="B29">2015</xref>).</p>
<p>For subsequent analyses, the number of epochs was equalized across conditions by minimizing timing discrepancies across trial lists, ensuring an identical epoch count per condition.</p>
</sec>
<sec>
<title>2.4 Mass-univariate permutation-based clustering</title>
<p>To examine differences in brain signatures evoked by the robot actions, we used mass-univariate permutation-based spatiotemporal clustering (Maris and Oostenveld, <xref ref-type="bibr" rid="B40">2007</xref>) with a paired <italic>t</italic>-test. The clustering was performed on contrast data, calculated by subject-wise subtracting suboptimal from optimal evoked responses.</p>
<p>Compared to traditional univariate approaches, such as performing an ANOVA or <italic>t</italic>-test on the mean or peak amplitude within a predefined time interval, mass-univariate statistics allow statistical testing at every location and time point (e.g., Maris and Oostenveld, <xref ref-type="bibr" rid="B40">2007</xref>; Pernet et al., <xref ref-type="bibr" rid="B52">2015</xref>; Groppe et al., <xref ref-type="bibr" rid="B22">2011</xref>).</p>
<p>This approach is particularly advantageous when addressing variability in ERP latencies arising from experimental parameters, such as complex stimulus material (e.g., in the case of the P300; Bentin et al., <xref ref-type="bibr" rid="B7">1999</xref>). However, the multiple comparisons problem-occurring when testing across many locations and time points-must be accounted for. Mass-univariate permutation-based spatiotemporal clustering addresses this issue by identifying clusters of contiguous samples (i.e., time points and sensors) that exhibit similar effects, thereby reducing the number of comparisons to the cluster level (Maris and Oostenveld, <xref ref-type="bibr" rid="B40">2007</xref>). Neighboring effects (test statistics of time points and sensors) that exceed a predefined univariate cluster-forming threshold (here &#x003B1; &#x0003C; 0.05) are grouped into clusters. Statistical values (e.g., <italic>t</italic>- or <italic>F</italic>-values) within these clusters are aggregated, for instance by summing them, into cluster-mass scores (Maris and Oostenveld, <xref ref-type="bibr" rid="B40">2007</xref>). Statistical significance is then determined by comparing the observed cluster-mass scores to a reference null distribution, generated via random resampling of condition labels (e.g., using Monte Carlo permutations or bootstrapping). A <italic>p</italic>-value is calculated for each cluster as the proportion of permutations in which the cluster-level statistic from the null distribution equals or exceeds the observed cluster-mass score obtained from the original dataset. To control the overall Type I error rate (false positives) across all clusters, only clusters with a <italic>p</italic>-value below a predefined group-level threshold (here &#x003B1; &#x0003C; 0.05) are considered statistically significant.</p>
</sec>
<sec>
<title>2.5 Temporal decoding with a linear machine learning model</title>
<p>Temporal decoding with subject-wise multivariate pattern analysis (MVPA) provides an alternative to mass-univariate analyses, offering enhanced sensitivity and statistical power (Holdgraf et al., <xref ref-type="bibr" rid="B26">2017</xref>; Kriegeskorte and Douglas, <xref ref-type="bibr" rid="B34">2019</xref>). MVPA leverages the multidimensional characteristics of neurophysiological data from each subject, thereby accounting for anatomical and functional inter-individual neural variability (Marsicano et al., <xref ref-type="bibr" rid="B41">2024</xref>).</p>
<p><xref ref-type="fig" rid="F3">Figure 3</xref> illustrates the input data structure and pipeline steps applied in temporal decoding. For the machine-learning (ML) based analyses, epoched data were downsampled to 100 Hz to reduce computational costs. Linear discriminant analysis (LDA), using a least-squares solution and automatic shrinkage via the Ledoit-Wolf lemma (as implemented in scikit-learn version 1.4.1), was applied as a sliding supervised ML algorithm (i.e., the Base Estimator) on a time-point-by-time-point basis (implemented in mne-python version 1.6.1; Gramfort et al., <xref ref-type="bibr" rid="B21">2014</xref>). The data of each participant (shape: n epochs, n channels, n timepoints; <xref ref-type="fig" rid="F3">Figure 3</xref>) was split into training and testing sets using a repeated stratified five-fold cross-validation with 20 iterations, resulting in 100 folds per time point. In total, 220 (timepoints) &#x000D7; 100 (cross-validation folds) &#x000D7; 16 (participants) models were trained and fitted in the time decoding. The Area Under the Receiver Operating Characteristic Curve (ROC-AUC, henceforth referred to as AUC) was used as performance metric. Classification performance was statistically evaluated by bootstrapping the AUC scores across participants and folds in a Monte Carlo simulation (MCS; 5,000 iterations), yielding the bootstrapped mean and its 95% confidence interval (CI; Cumming, <xref ref-type="bibr" rid="B12">2014</xref>). Time intervals were considered significant if the lower CI boundary of the average LDA performance exceeded the upper CI boundary of an average dummy performance (i.e., an empirical baseline estimated by chance-level stratified classification in scikit-learn version 1.4.1).</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Overview of the preprocessing steps, data structure and machine learning pipeline for the <bold>(left column)</bold> temporal decoding and <bold>(right column)</bold> single-trial decoding. <italic>N</italic>, sample size; LDA, linear discriminant analysis; Iter, iterations.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-06-1535799-g0003.tif"/>
</fig>
<p>After fitting the linear models, model decoding weights were transformed into activation patterns representing their contribution to classification through inverse computations (Haufe et al., <xref ref-type="bibr" rid="B24">2014</xref>). These activation patterns were averaged across participants and visualized using topographic maps. A spatiotemporal mask was applied to identify statistically significant activation patterns using univariate bootstrapped means and CIs (MCS with 5,000 iterations). Only patterns at electrode positions where the CI for the average evoked response contrast (suboptimal&#x02013;optimal robot actions) excluded zero were considered significant and visualized. Positive values in the activation patterns indicate that the region contributes to the classification of evaluated suboptimal robot actions, whereas negative values indicate a contribution to the classification of evaluated optimal robot actions. Pattern values closer to zero indicate lower confidence in their contribution. To assess the relationship between evoked response amplitudes and significant patterns, the time course of contributing regions was visualized, along with bootstrapped means and CIs for each condition at the time point of maximal classification performance.</p>
</sec>
<sec>
<title>2.6 Single-trial decoding</title>
<p>In the final analysis, we decoded the observer&#x00027;s evaluation of robot actions from brain signatures on a trial-by-trial basis using three distinct time intervals for feature extraction identified through MVPA time decoding (see <xref ref-type="fig" rid="F3">Figure 3</xref> for an illustrative overview). These intervals were defined as (1) 0&#x02013;750 ms, (2) 750&#x02013;1,350 ms, and (3) 1,350&#x02013;2,000 ms after video onset. As in the time decoding, data were downsampled to 100 Hz to reduce computational costs.</p>
<p>An LDA classifier with automatically extracted features based on Riemannian geometry has been proven effective for state decoding in passive BCIs (Lotte et al., <xref ref-type="bibr" rid="B38">2018</xref>; Vukeli&#x00107; et al., <xref ref-type="bibr" rid="B72">2023</xref>) and was, thus, applied to each time interval in a within-subject single-trial decoding (implemented in pyRiemann; version 0.5). The Riemannian-based method operates directly on the epoched EEG time series (data shape: n epochs, n channels, n timepoints; <xref ref-type="fig" rid="F3">Figure 3</xref>), obviating the need for manual feature extraction. It converts the EEG time series into symmetric positive definite (SPD) covariance matrices and applies Riemannian geometry to analyse these matrices (Congedo et al., <xref ref-type="bibr" rid="B11">2017</xref>; Appriou et al., <xref ref-type="bibr" rid="B2">2020</xref>; Vukeli&#x00107; et al., <xref ref-type="bibr" rid="B72">2023</xref>). In the Riemannian manifold, covariance matrices were spatially filtered with the xDAWN algorithm (Rivet et al., <xref ref-type="bibr" rid="B58">2009</xref>) before being projected into tangent space for transformation into Euclidean vectors (Barachant et al., <xref ref-type="bibr" rid="B5">2011</xref>). This tangent space projection preserves the manifold structure while enabling effective classification (Appriou et al., <xref ref-type="bibr" rid="B2">2020</xref>).</p>
<p>Classification was performed using an LDA classifier (with default settings as implemented in scikit-learn version 1.4.1). Performance was quantified using a repeated stratified k-fold cross-validation (five splits, 20 iterations) with AUC as metric. As with temporal decoding, a dummy classifier estimated chance-level performance. Non-parametric bootstrapping of classification scores across folds and subjects yielded the average performance and corresponding CI for each classifier, enabling statistical evaluation (Cumming, <xref ref-type="bibr" rid="B12">2014</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>3 Results</title>
<sec>
<title>3.1 Mass-univariate permutation-based clustering</title>
<p>The non-parametric permutation-based clustering identified significant spatiotemporal differences in evoked responses when observing suboptimal compared to optimal robot actions across five clusters.</p>
<p>The first two clusters emerged &#x0007E;440 ms after video onset, revealing lateralised frontal responses. Observing suboptimal robot actions resulted in a reduced left-hemispheric frontal current sink (<xref ref-type="fig" rid="F4">Figure 4A</xref>; 13 electrodes; <italic>p</italic> &#x0003C; 0.001) and an enhanced right-hemispheric frontal current sink (<xref ref-type="fig" rid="F4">Figure 4B</xref>; six electrodes; <italic>p</italic> &#x0003C; 0.001). In electrodes overlying right-hemispheric frontal regions, observing optimal robot actions was even associated with current sources (i.e., a positive deflection) from around 500 ms until the analysis window&#x00027;s end (<xref ref-type="fig" rid="F4">Figure 4B</xref>). The third cluster, including 25 electrodes over occipital, parietal, and left temporal regions, emerged at 448 ms. It differentiated robot actions by showing a reduced current source peak around 550 ms, followed by an increased current sink from 800 to 1,760 ms for suboptimal compared to optimal actions (<xref ref-type="fig" rid="F4">Figure 4C</xref>; <italic>p &#x0003C; 0.001</italic>). The fourth cluster, with five electrodes over right parieto-temporal regions, appeared at 460 ms, showing increased current sources for suboptimal actions (<xref ref-type="fig" rid="F4">Figure 4D</xref>; <italic>p</italic> &#x0003C; 0.017). Finally, the fifth cluster over fronto-central regions, emerging after 576 ms, showed a decreased current sink for suboptimal actions (<xref ref-type="fig" rid="F4">Figure 4E</xref>; eight electrodes; <italic>p</italic> &#x0003C; 0.013). All clusters persisted almost until the end of the 2-s analysis interval (1,760&#x02013;1,996 ms).</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p>Spatio-temporal clusters <bold>(A&#x02013;E)</bold> with topographical maps of averaged <italic>t</italic>-values, along with evoked responses for each condition and their contrast. Significant electrode positions for each cluster are indicated by filled white circles. Grand averages (<italic>n</italic> = 16) of the evoked responses during observation of optimal (green) and suboptimal (red) robot actions are shown over time, including their contrasts (suboptimal&#x02013;optimal; brown dashed line). The time ranges of significant clusters are highlighted in orange.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-06-1535799-g0004.tif"/>
</fig>
</sec>
<sec>
<title>3.2 Temporal decoding with a linear machine learning model</title>
<p>Temporal decoding using MVPA and LDA successfully distinguished the brain signatures evoked by observing optimal vs. suboptimal robot actions.</p>
<p>The empirical chance level of the dummy classifier was estimated at an AUC score of 48.4 95% CI [48.06, 48.79]. In later intervals, beginning 750 ms post-stimulus onset and continuing until the end of the 2-s analysis period, classification performance consistently exceeded a 60% AUC score. The classification performance varied over the analysis interval, with a standard deviation of 4.13 (4.11, 4.13). The highest classification performance was observed after 1,658 ms with an AUC score of 63.99 95% CI [62.56, 65.38], representing a difference of 15.21 (95% CI [13.78, 16.6]) to the upper CI boundary of the mean chance performance (see <xref ref-type="fig" rid="F5">Figure 5A</xref>).</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Classification performance in the MVPA temporal decoding with LDA. <bold>(A)</bold> Average LDA temporal classification performance, including the corresponding CI band across folds and subjects, is presented relative to the estimated chance level (upper CI boundary of the average dummy classifier performance). Below, the average activation patterns derived from model weight coefficients are depicted. Patterns were spatio-temporally masked using bootstrapped CIs and averaged over time intervals of 200 ms starting 200 ms before to 2,000 ms after the onset of the video. The star icon indicates the peak (max at 1,658 ms) of above-chance level classification performance. <bold>(B)</bold> Activation pattern of the time point of peak decoding performance, along with the evoked responses per condition in the regions of meaningful contribution at the maximum decoding time point and as time series along the analysis interval (dashed gray line: contrast suboptimal&#x02013;optimal). Positive pattern values are associated with classifying observed optimal robot actions, while negative values in activation patterns are associated with observed suboptimal robot actions.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-06-1535799-g0005.tif"/>
</fig>
<p>At the peak decoding time, significant activation patterns highlighted three regions of interest that differentiated between optimal and suboptimal robot actions. These regions included electrodes over the right frontal, left fronto-temporal, and mid-parietal areas, corresponding to three clusters identified in the mass-univariate permutation-based analysis.</p>
<p><xref ref-type="fig" rid="F5">Figure 5B</xref> shows the relationship between classification-contributing regions and the brain signatures evoked in these regions by the conditions. The pattern that classified suboptimal robot actions comprised electrodes positioned over a left fronto-temporal region (F7 and FT9) and revealed a current source for suboptimal actions, while optimal robot actions elicited a current sink. Two other regions contributed to classifying optimal robot actions: Current sinks in a right frontal electrode (Fp2) and electrodes overlying the mid-parietal region (P1, Pz, P2) were reduced for evaluating optimal compared to suboptimal actions.</p>
</sec>
<sec>
<title>3.3 Single-trial decoding</title>
<p>In the single-trial decoding of robot performance evaluations, the Riemannian LDA combined with xDawn spatial filtering yielded classification results above chance level for all selected time intervals (dummy performance: train AUC = 48.21, 95% CI [48.15, 48.28]; test AUC = 51.37, 95% CI [51.36, 51.38]). The highest classification performance was observed using the latest interval including evoked responses from 1,350 to 2,000 ms after video onset, with a test AUC of 67.19 (95% CI [66.85, 67.53]). This interval also included the time point of peak decoding performance in the MVPA-based temporal decoding. In contrast, earlier intervals cropped before 750 ms post-stimulus showed a significant decrease in performance, with test AUCs of 59.98 (95% CI [59.77, 60.20]) for an interval from 0 to 700 ms and 58.37 (95% CI [58.11, 58.62]) for an interval from 700 to 1,350 ms (<xref ref-type="fig" rid="F6">Figure 6</xref>).</p>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>Riemannian LDA single-trial decoding performance by time interval. Bar plots display the average AUC classification scores of the Riemannian LDA relative to the upper CI boundary of the estimated chance level (dummy performance: train AUC = 48.02, 95% CI [47.99, 51.06]; test AUC = 51.04, 95% CI [51.02, 51.06]). The mean AUC score (M) and its 95% CI ([]), estimated via bootstrapping and represented by error bars, are displayed above each bar (for training and test datasets across time intervals). Individual subject decoding performances are depicted as scattered dots. Time intervals of the decoding were (1) 0 to 700 ms, (2) 700 to 1,350 ms, and (3) 1,350 to 2,000 ms after video onset.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-06-1535799-g0006.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>4 Discussion</title>
<p>Our study identified distinct spatiotemporal brain dynamics that reliably differentiate the mental performance evaluation of optimal and suboptimal robotic actions observed in video excerpts of laparoscopic training procedures.</p>
<p>We assessed the robustness of neural signatures by employing surface Laplacian transformations to enhance the spatial resolution of evoked responses (see Somon et al., <xref ref-type="bibr" rid="B65">2019</xref>) and two complementary data-driven methods - a mass-univariate permutation-based clustering and multivariate pattern analysis (MVPA) temporal decoding. The identified discriminative spatiotemporal brain signatures suggest that differentiation between optimal and suboptimal actions does not occur during early perceptual stages but rather at later evaluative stages (Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B18">2005</xref>; Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; Oliveira et al., <xref ref-type="bibr" rid="B45">2007</xref>). This finding was observed despite a perceptual component introduced by LED feedback in the evaluation task.</p>
<p>In addition to analyzing stimulus-locked evoked responses, we examined single-trial decoding performance of robot action evaluations across different time intervals of averaged evoked brain responses. The Riemannian LDA with xDawn filtering reliably classified observers&#x00027; electrophysiological responses to optimal and suboptimal robot actions on a trial-by-trial level. Temporal dynamics of classification performance revealed that late intervals (from 1,350 to 2,000 ms post-stimulus) significantly outperformed earlier intervals aligning with findings from MVPA temporal decoding. This indicates that the most informative brain patterns are linked to attentional and evaluative processes related to prediction violations and unexpected events (Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>; Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B18">2005</xref>; Oliveira et al., <xref ref-type="bibr" rid="B45">2007</xref>).</p>
<sec>
<title>4.1 Convergent findings for evoked responses differentiating robot performance</title>
<p>Our clustering analyses revealed five spatiotemporal brain signatures associated with robot performance evaluation, of which three were replicated in the temporal decoding. The other two clusters including electrodes localized over right mid-fronto-central and temporal areas were exclusively identified in the mass-univariate analysis. Thus, they exhibited limited reliability as distinctive patterns for evaluating robot performance in near-naturalistic scenarios. Therefore, the next section focuses on the remaining three signatures located over the left fronto-temporal, right frontal, and mid-occipito-parietal regions.</p>
<sec>
<title>4.1.1 Left fronto-temporal spatiotemporal signature</title>
<p>Evoked responses in electrodes overlying left fronto-temporal regions differentiated the evaluation of optimal and suboptimal robot performance in both the clustering (<xref ref-type="fig" rid="F4">Figure 4A</xref>) and temporal decoding (<xref ref-type="fig" rid="F5">Figure 5B</xref>, upper row). This brain dynamic is characterized by differences in current direction-switching around 420 ms post-stimulus onset between suboptimal and optimal robot actions. Observing suboptimal performance evoked a persistent current source (see <xref ref-type="fig" rid="F5">Figure 5B</xref>, upper row), while optimal performance elicited a sustained current sink during late time intervals (see <xref ref-type="fig" rid="F4">Figures 4A</xref>, <xref ref-type="fig" rid="F5">5B</xref>, upper row).</p>
<p>The late shift to a current source during suboptimal actions may indicate the allocation of additional cognitive resources for conflict processing and deviation detection (Botvinick et al., <xref ref-type="bibr" rid="B8">2001</xref>; Ullsperger et al., <xref ref-type="bibr" rid="B69">2014</xref>; Bartholow et al., <xref ref-type="bibr" rid="B6">2005</xref>; Pailing and Segalowitz, <xref ref-type="bibr" rid="B48">2004</xref>). Although typical oERN or oPE responses were not observed in this study, the sustained fronto-temporal responses align with components such as the P300, late positive potential (LPP) and P600 (Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>; Sassenhagen et al., <xref ref-type="bibr" rid="B60">2014</xref>; Oliveira et al., <xref ref-type="bibr" rid="B45">2007</xref>). The P300 and LPP are positive deflections that typically emerge around 300 ms after significant and emotionally salient stimuli, respectively, at centroparietal electrode sites (Polich, <xref ref-type="bibr" rid="B54">2007</xref>; see Hajcak and Foti, <xref ref-type="bibr" rid="B23">2020</xref> for review). The P300 appears as a broad peak, while the LPP can be sustained for up to 1,000 ms or more. Their amplitudes increase in response to motivationally significant but also deviant and uncertain stimuli (Scheffers and Coles, <xref ref-type="bibr" rid="B61">2000</xref>; Sutton et al., <xref ref-type="bibr" rid="B67">1965</xref>), indicating sustained attentional allocation toward these stimuli (Ridderinkhof et al., <xref ref-type="bibr" rid="B57">2009</xref>; Hajcak and Foti, <xref ref-type="bibr" rid="B23">2020</xref>; Falkenstein et al., <xref ref-type="bibr" rid="B17">2000</xref>). The P600, initially linked to processing linguistic anomalies (Sassenhagen et al., <xref ref-type="bibr" rid="B60">2014</xref>), has also been observed during error processing in choice-reaction time tasks with enhanced amplitudes following errors Falkenstein et al. (<xref ref-type="bibr" rid="B16">1991</xref>).</p>
<p>In summary, the sustained current source observed during suboptimal robot actions likely reflects increased cognitive and attentional engagement in a persistent evaluative stance. This state likely facilitates conflict detection by assessing action accuracy and adequacy, monitors deviations, and supports cognitive flexibility.</p>
</sec>
<sec>
<title>4.1.2 Right frontal and mid-occipito-parietal spatiotemporal signatures</title>
<p>In addition to the left fronto-temporal signature, we observed two spatiotemporal signatures characterized by enhanced current sinks for evaluated suboptimal robot performance in both, the cluster analysis (<xref ref-type="fig" rid="F4">Figures 4B</xref>, <xref ref-type="fig" rid="F4">C</xref>) and temporal decoding activation patterns (<xref ref-type="fig" rid="F5">Figure 5B</xref>, middle and lower row).</p>
<p>After &#x0007E;300 ms, a right frontal current sink emerged, peaking between 400 and 600 ms, with a delayed but pronounced deflection in response to suboptimal actions (<xref ref-type="fig" rid="F4">Figures 4B</xref>, <xref ref-type="fig" rid="F5">5B</xref>, middle). Another spatiotemporal brain signature, indicative of suboptimal actions and located over mid-occipito-parietal areas, appeared between 350 and &#x0007E;500 ms (<xref ref-type="fig" rid="F4">Figures 4C</xref>, <xref ref-type="fig" rid="F5">5B</xref>, lower row). This mid-occipito-parietal signature is characterized by a current sink deflection in response to both optimal and suboptimal robot actions, followed by a short time interval of current source with a peak at 550 ms. Afterwards, another directional switch from source to sink is observed, occurring around 600 ms second in the decoding and 700 ms in the clustering analysis. In both analyses, this sustained current sink in late time intervals after stimulus onset was more pronounced when observing suboptimal compared to optimal robot performance.</p>
<p>These time windows and sustained current sinks for suboptimal robot actions likely reflect a combination of a delayed oERN and an interaction ERN/N400 (Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B18">2005</xref>; Ferrez and Mill&#x000E1;n, <xref ref-type="bibr" rid="B19">2008</xref>; Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>). The delay in evoked response intervals is potentially attributable to the erroneous robot action occurring shortly after the video onset. Notably, the N400 has previously been observed in non-linguistic contexts over parietal areas in response to unexpected motor sequences (Balconi and Vitaloni, <xref ref-type="bibr" rid="B4">2014</xref>). Both ERP components are amplified when observing erroneous, suboptimal actions. In their sustained form, they may reflect ongoing quality evaluation, signaling deviations from predicted trajectories and expected movements, thereby indicating suboptimal performance.</p>
<p>To summarize, through temporal decoding and clustering analyses, we identified three consistent spatiotemporal signatures that distinguish the evaluation of optimal and suboptimal robot performance. A left fronto-temporal signature, characterized by an enhanced current source resembling ERP components such as the P300, LPP, and P600, suggests increased attentional allocation and sustained evaluation of suboptimal robot actions. Furthermore, right frontal and mid-occipito-parietal signatures displayed amplified current sinks in response to suboptimal actions, suggesting prediction-based processing of deviations and errors, consistent with the oERN and interaction-based ERN/N400.</p>
</sec>
</sec>
<sec>
<title>4.2 Effects of task load and video stimulus material</title>
<p>The identified discriminative evoked signatures reflect a sustained, step-by-step evaluation of robot actions from continuous video excerpts. They persisted even after deviations from expected (optimal) performance were detected. Consequently, optimal robot actions were characterized by the absence of deviations throughout the entire video. In our specific task, participants were required to monitor and mentally assess multiple aspects of the action, including the position, length, and pressure of the intended cut. Thus, even if the robot correctly reached the target position, participants needed to verify that all criteria were met. Accordingly, it is noteworthy that the continuous video stimulus, coupled with the ongoing monitoring and evaluation of robot actions in an applied scenario, likely imposed a substantial perceptual and cognitive load on participants.</p>
<p>This task-induced load may have reduced differences in the amplitude of evoked responses between observed suboptimal and optimal actions (Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>, <xref ref-type="bibr" rid="B65">2019</xref>; see Endrass et al., <xref ref-type="bibr" rid="B14">2012a</xref>,<xref ref-type="bibr" rid="B15">b</xref> for load effects during self-monitoring). It could explain the lack of modulated amplitudes in early components during the observation of suboptimal actions. In addition, although the task instructions aimed to emphasize the importance of errors and the potentially serious consequences of mistakes in laparoscopic surgery, the absence of a modulated Pe component in response to suboptimal robot actions may be due to the low (self-related) relevance of negative outcomes for participants in a passive observation role (Chavarriaga et al., <xref ref-type="bibr" rid="B10">2010</xref>; Somon et al., <xref ref-type="bibr" rid="B64">2017</xref>).</p>
<p>To conclude, given that task-induced cognitive load on the observer may be inherently present and unavoidable in real-world applications, further investigation is warranted to ensure ecologically valid and robust correlates of performance assessment.</p>
</sec>
<sec>
<title>4.3 Limitations and future directions</title>
<p>The study offers valuable insights into the neural mechanisms underlying robot performance evaluation and error monitoring in a near-naturalistic laparoscopic surgical training context. However, several limitations must be considered.</p>
<p>To aid non-medical participants in judging whether the robot&#x00027;s actions were optimal or suboptimal - particularly for subtle criteria such as applied pressure - LEDs were placed along the tissue to be cut. While this LED feedback during suboptimal actions (i.e., LEDs remained lit) vs. optimal actions (LEDs turned off) was essential for participants&#x00027; understanding and engagement, it may have influenced evoked responses, introducing a perceptual component to the task and complicating comparisons with previous studies. Future research on passive BCIs for robotic training should explore alternative guidance methods and additional information sources, such as haptic feedback representing applied pressure, to reduce reliance on perceptual feedback and better isolate evaluative processes.</p>
<p>Our findings revealed a lateralised error-related brain signature, marked by enhanced left fronto-temporal current sources and right frontal current sinks. This lateralisation has not been previously reported and may represent a novel correlate of performance assessment during robot action monitoring in applied scenarios. Given the rather small sample size in this study, further research should confirm the robustness and replicability of the identified spatiotemporal brain signatures linked to robot action evaluation in realistic scenarios.</p>
<p>Moreover, including medical students or even physicians would enable an assessment of the impact of expert knowledge. Future research should also explore the potential effects of participant fatigue or fluctuating task engagement throughout prolonged video sequences, as this may introduce variability in evoked responses. Addressing these factors could further clarify the robustness of our identified brain signatures.</p>
<p>Precise onset detection of suboptimal actions is challenging in near-naturalistic experiments and is often shaped by subjective observer criteria. Therefore, eye-related measures, such as fixations (Simola et al., <xref ref-type="bibr" rid="B63">2015</xref>; Ladouce et al., <xref ref-type="bibr" rid="B36">2022</xref>) or blinks (Alyan et al., <xref ref-type="bibr" rid="B1">2023</xref>), may provide an ecologically valid approach to further investigate attentional shifts toward significant deviations and the associated evaluative processing.</p>
<p>Combining deviation onset detection through eye-based approaches with findings from temporal and single-trial decoding establishes a foundation for developing passive BCIs to reliably label robot actions for reinforcement-learning-based training paradigms. In our study, the most informative signals were extracted from late evoked responses linked to attentional or evaluative processes. Consequently, BCI algorithms should focus on these late evaluative intervals (beyond 1,000 ms after eye-based deviation detection) to enhance decoding accuracy. However, it is important to note that while late evoked responses are suitable for training robots, the delay of a few hundred milliseconds following error detection may restrict their effectiveness for real-time interventions. Such real-time interventions could provide a safeguard in robot-assisted surgeries. To overcome this limitation, future studies could investigate a multisensory decoding approach that integrates electrophysiological, peripheral-physiological, and eye-based data, combined with a conservative stop criterion (high sensitivity/true positive rate), to develop a system capable of intervening and eliminating suboptimal robotic actions in real-life surgical scenarios.</p>
<p>The next steps toward BCI-assisted robot training in real-world settings include replicating these findings in (a) dual-task paradigms that simulate collaborative scenarios with individual and shared tasks, and (b) using mobile, dry EEG systems suited for unobtrusive, everyday measurements (e.g., Vukeli&#x00107; et al., <xref ref-type="bibr" rid="B72">2023</xref>).</p>
</sec>
</sec>
<sec sec-type="conclusions" id="s5">
<title>5 Conclusion</title>
<p>Our study reveals three robust spatiotemporal brain signatures that distinguish between evaluated optimal and suboptimal robotic actions during laparoscopic training. The findings emphasize the critical role of late-stage evaluative brain processes in detecting deviations in robotic performance. Specifically, the left fronto-temporal signature, associated with ERP components such as the P300, LPP, and P600, indicates sustained attentional and evaluative engagement in response to suboptimal actions. Additionally, amplified current sinks in right frontal and mid-occipito-parietal regions, consistent with error-related responses like the oERN and ERN/N400, suggest prediction-based processing of errors and deviations.</p>
<p>By delineating distinct electrophysiological patterns, our results deepen the understanding of the neural mechanisms underpinning mental assessments of robotic performance in near-naturalistic scenarios. These insights hold promise for advancing passive BCIs capable of facilitating real-time, automated evaluations in robotic training and collaborative surgical contexts. The research highlights the role of late-stage electrophysiological responses, linked to attentional and evaluative processes, in detecting significant deviations from optimal robotic actions. Integrating these findings into reinforcement-learning-based training frameworks could reduce reliance on explicit feedback from human instructors, enabling more efficient and intuitive human-centered robotic training systems.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in an online OSF repository and are accessible upon request at <ext-link ext-link-type="uri" xlink:href="https://osf.io/6ndsv/">https://osf.io/6ndsv/</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the local ethics committee of the University of T&#x000FC;bingen (ID: 827/2020BO1). The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>KL: Conceptualization, Data curation, Formal analysis, Funding acquisition, Investigation, Methodology, Project administration, Resources, Software, Supervision, Validation, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. JR: Data curation, Investigation, Validation, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. LK: Conceptualization, Funding acquisition, Methodology, Resources, Software, Validation, Visualization, Writing &#x02013; review &#x00026; editing. FM-U: Funding acquisition, Resources, Supervision, Writing &#x02013; review &#x00026; editing. MV: Conceptualization, Funding acquisition, Methodology, Resources, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. The research was supported by the Vector Stiftung Baden-W&#x000FC;rttemberg (MINT-Innovationen P-2020-0116) and Ministry of Economic Affairs, Labor, and Tourism Baden-W&#x000FC;rttemberg (KI-Fortschrittszentrum Lernende Systeme und Kognitive Robotik).</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author(s) declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declare that no Gen AI was used in the creation of this manuscript.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec><sec sec-type="supplementary-material" id="s12">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnrgo.2025.1535799/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnrgo.2025.1535799/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Video_1.mp4" id="SM1" mimetype="video/mp4" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM2" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alyan</surname> <given-names>E.</given-names></name> <name><surname>Wascher</surname> <given-names>E.</given-names></name> <name><surname>Arnau</surname> <given-names>S.</given-names></name> <name><surname>Kaesemann</surname> <given-names>R.</given-names></name> <name><surname>Reiser</surname> <given-names>J. E.</given-names></name></person-group> (<year>2023</year>). <article-title>Operator state in a workplace simulation modulates eye-blink related EEG activity</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng</source>. <volume>31</volume>, <fpage>1167</fpage>&#x02013;<lpage>1179</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2023.3241962</pub-id><pub-id pub-id-type="pmid">37022454</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Appriou</surname> <given-names>A.</given-names></name> <name><surname>Cichocki</surname> <given-names>A.</given-names></name> <name><surname>Lotte</surname> <given-names>F.</given-names></name></person-group> (<year>2020</year>). <article-title>Modern machine-learning algorithms: for classifying cognitive and affective states from electroencephalography signals</article-title>. <source>IEEE Syst. Man Cybern. Mag</source>. <volume>6</volume>, <fpage>29</fpage>&#x02013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1109/MSMC.2020.2968638</pub-id></citation>
</ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aric&#x000F2;</surname> <given-names>P.</given-names></name> <name><surname>Borghini</surname> <given-names>G.</given-names></name> <name><surname>Di Flumeri</surname> <given-names>G.</given-names></name> <name><surname>Sciaraffa</surname> <given-names>N.</given-names></name> <name><surname>Babiloni</surname> <given-names>F.</given-names></name></person-group> (<year>2018</year>). <article-title>Passive BCI beyond the lab: current trends and future directions</article-title>. <source>Physiol. Meas</source>. <volume>39</volume>:<fpage>08T</fpage>R02. <pub-id pub-id-type="doi">10.1088/1361-6579/aad57e</pub-id><pub-id pub-id-type="pmid">30039806</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Balconi</surname> <given-names>M.</given-names></name> <name><surname>Vitaloni</surname> <given-names>S.</given-names></name></person-group> (<year>2014</year>). <article-title>N400 effect when a semantic anomaly is detected in action representation. a source localization analysis</article-title>. <source>J. Clin. Neurophysiol</source>. <volume>31</volume>, <fpage>58</fpage>&#x02013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.1097/WNP.0000000000000017</pub-id><pub-id pub-id-type="pmid">24492448</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barachant</surname> <given-names>A.</given-names></name> <name><surname>Bonnet</surname> <given-names>S.</given-names></name> <name><surname>Congedo</surname> <given-names>M.</given-names></name> <name><surname>Jutten</surname> <given-names>C.</given-names></name></person-group> (<year>2011</year>). <article-title>Multiclass brain-computer interface classification by Riemannian geometry</article-title>. <source>IEEE Trans. Biomed. Eng</source>. <volume>59</volume>, <fpage>920</fpage>&#x02013;<lpage>928</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2011.2172210</pub-id><pub-id pub-id-type="pmid">22010143</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bartholow</surname> <given-names>B. D.</given-names></name> <name><surname>Pearson</surname> <given-names>M. A.</given-names></name> <name><surname>Dickter</surname> <given-names>C. L.</given-names></name> <name><surname>Sher</surname> <given-names>K. J.</given-names></name> <name><surname>Fabiani</surname> <given-names>M.</given-names></name> <name><surname>Gratton</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2005</year>). <article-title>Strategic control and medial frontal negativity: beyond errors and response conflict</article-title>. <source>Psychophysiology</source> <volume>42</volume>, <fpage>33</fpage>&#x02013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.2005.00258.x</pub-id><pub-id pub-id-type="pmid">15720579</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bentin</surname> <given-names>S.</given-names></name> <name><surname>Mouchetant-Rostaing</surname> <given-names>Y.</given-names></name> <name><surname>Giard</surname> <given-names>M.-H.</given-names></name> <name><surname>Echallier</surname> <given-names>J.-F.</given-names></name> <name><surname>Pernier</surname> <given-names>J.</given-names></name></person-group> (<year>1999</year>). <article-title>ERP manifestations of processing printed words at different psycholinguistic levels: time course and scalp distribution</article-title>. <source>J. Cogn. Neurosci</source>. <volume>11</volume>, <fpage>235</fpage>&#x02013;<lpage>260</lpage>. <pub-id pub-id-type="doi">10.1162/089892999563373</pub-id><pub-id pub-id-type="pmid">10402254</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Botvinick</surname> <given-names>M. M.</given-names></name> <name><surname>Braver</surname> <given-names>T. S.</given-names></name> <name><surname>Barch</surname> <given-names>D. M.</given-names></name> <name><surname>Carter</surname> <given-names>C. S.</given-names></name> <name><surname>Cohen</surname> <given-names>J. D.</given-names></name></person-group> (<year>2001</year>). <article-title>Conflict monitoring and cognitive control</article-title>. <source>Psychol. Rev</source>. <volume>108</volume>:<fpage>624</fpage>. <pub-id pub-id-type="doi">10.1037/0033-295X.108.3.624</pub-id><pub-id pub-id-type="pmid">11488380</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chaumon</surname> <given-names>M.</given-names></name> <name><surname>Bishop</surname> <given-names>D. V.</given-names></name> <name><surname>Busch</surname> <given-names>N. A.</given-names></name></person-group> (<year>2015</year>). <article-title>A practical guide to the selection of independent components of the electroencephalogram for artifact correction</article-title>. <source>J. Neurosci. Methods</source> <volume>250</volume>, <fpage>47</fpage>&#x02013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2015.02.025</pub-id><pub-id pub-id-type="pmid">25791012</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Chavarriaga</surname> <given-names>R.</given-names></name> <name><surname>Biasiucci</surname> <given-names>A.</given-names></name> <name><surname>F&#x000F6;rster</surname> <given-names>K.</given-names></name> <name><surname>Roggen</surname> <given-names>D.</given-names></name> <name><surname>Tr&#x000F6;ster</surname> <given-names>G.</given-names></name> <name><surname>Mill&#x000E1;n</surname> <given-names>J. R.</given-names></name></person-group> (<year>2010</year>). <article-title>&#x0201C;Adaptation of hybrid human-computer interaction systems using EEG error-related potentials,&#x0201D;</article-title> in <source>2010 Annual International Conference of the IEEE Engineering in Medicine and Biology</source> (<publisher-loc>Buenos Aires</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4226</fpage>&#x02013;<lpage>4229</lpage>. <pub-id pub-id-type="doi">10.1109/IEMBS.2010.5627376</pub-id><pub-id pub-id-type="pmid">21096899</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Congedo</surname> <given-names>M.</given-names></name> <name><surname>Barachant</surname> <given-names>A.</given-names></name> <name><surname>Bhatia</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). <article-title>Riemannian geometry for EEG-based brain-computer interfaces; a primer and a review</article-title>. <source>Brain-Comput. Interfaces</source> <volume>4</volume>, <fpage>155</fpage>&#x02013;<lpage>174</lpage>. <pub-id pub-id-type="doi">10.1080/2326263X.2017.1297192</pub-id></citation>
</ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cumming</surname> <given-names>G.</given-names></name></person-group> (<year>2014</year>). <article-title>The new statistics: why and how</article-title>. <source>Psychol. Sci</source>., <volume>25</volume>, <fpage>7</fpage>&#x02013;<lpage>29</lpage>. <pub-id pub-id-type="doi">10.1177/0956797613504966</pub-id><pub-id pub-id-type="pmid">24220629</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ehrlich</surname> <given-names>S. K.</given-names></name> <name><surname>Cheng</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>A feasibility study for validating robot actions using EEG-based error-related potentials</article-title>. <source>Int. J. Soc. Robot</source>. <volume>11</volume>, <fpage>271</fpage>&#x02013;<lpage>283</lpage>. <pub-id pub-id-type="doi">10.1007/s12369-018-0501-8</pub-id><pub-id pub-id-type="pmid">36369025</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Endrass</surname> <given-names>T.</given-names></name> <name><surname>Klawohn</surname> <given-names>J.</given-names></name> <name><surname>Gruetzmann</surname> <given-names>R.</given-names></name> <name><surname>Ischebeck</surname> <given-names>M.</given-names></name> <name><surname>Kathmann</surname> <given-names>N.</given-names></name></person-group> (<year>2012a</year>). <article-title>Response-related negativities following correct and incorrect responses: evidence from a temporospatial principal component analysis</article-title>. <source>Psychophysiology</source> <volume>49</volume>, <fpage>733</fpage>&#x02013;<lpage>743</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.2012.01365.x</pub-id><pub-id pub-id-type="pmid">22417070</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Endrass</surname> <given-names>T.</given-names></name> <name><surname>Klawohn</surname> <given-names>J.</given-names></name> <name><surname>Preuss</surname> <given-names>J.</given-names></name> <name><surname>Kathmann</surname> <given-names>N.</given-names></name></person-group> (<year>2012b</year>). <article-title>Temporospatial dissociation of pe subcomponents for perceived and unperceived errors</article-title>. <source>Front. Hum. Neurosci</source>. <volume>6</volume>:<fpage>178</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2012.00178</pub-id><pub-id pub-id-type="pmid">22737113</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Falkenstein</surname> <given-names>M.</given-names></name> <name><surname>Hohnsbein</surname> <given-names>J.</given-names></name> <name><surname>Hoormann</surname> <given-names>J.</given-names></name> <name><surname>Blanke</surname> <given-names>L.</given-names></name></person-group> (<year>1991</year>). <article-title>Effects of crossmodal divided attention on late ERP components. ii. error processing in choice reaction tasks</article-title>. <source>Electroencephalogr. Clin. Neurophysiol</source>. <volume>78</volume>, <fpage>447</fpage>&#x02013;<lpage>455</lpage>. <pub-id pub-id-type="doi">10.1016/0013-4694(91)90062-9</pub-id><pub-id pub-id-type="pmid">1712280</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Falkenstein</surname> <given-names>M.</given-names></name> <name><surname>Hoormann</surname> <given-names>J.</given-names></name> <name><surname>Christ</surname> <given-names>S.</given-names></name> <name><surname>Hohnsbein</surname> <given-names>J.</given-names></name></person-group> (<year>2000</year>). <article-title>ERP components on reaction errors and their functional significance: a tutorial</article-title>. <source>Biol. Psychol</source>. <volume>51</volume>, <fpage>87</fpage>&#x02013;<lpage>107</lpage>. <pub-id pub-id-type="doi">10.1016/S0301-0511(99)00031-9</pub-id><pub-id pub-id-type="pmid">10686361</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Ferrez</surname> <given-names>P. W.</given-names></name> <name><surname>Mill&#x000E1;n</surname> <given-names>J. D. R.</given-names></name></person-group> (<year>2005</year>). <article-title>&#x0201C;You are wrong! automatic detection of interaction errors from brain waves,&#x0201D;</article-title> in <source>Proceedings of the 19th International Joint Conference on Artificial Intelligence, IJCAI&#x00027;05</source> (<publisher-loc>San Francisco, CA</publisher-loc>: <publisher-name>Morgan Kaufmann Publishers Inc</publisher-name>), <fpage>1413</fpage>&#x02013;<lpage>1418</lpage>.</citation>
</ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ferrez</surname> <given-names>P. W.</given-names></name> <name><surname>Mill&#x000E1;n</surname> <given-names>J. R.</given-names></name></person-group> (<year>2008</year>). <article-title>Error-related EEG potentials generated during simulated brain-computer interaction</article-title>. <source>IEEE Trans. Biomed. Eng</source>. <volume>55</volume>, <fpage>923</fpage>&#x02013;<lpage>929</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2007.908083</pub-id><pub-id pub-id-type="pmid">18334383</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gehring</surname> <given-names>W. J.</given-names></name> <name><surname>Goss</surname> <given-names>B.</given-names></name> <name><surname>Coles</surname> <given-names>M. G.</given-names></name> <name><surname>Meyer</surname> <given-names>D. E.</given-names></name> <name><surname>Donchin</surname> <given-names>E.</given-names></name></person-group> (<year>1993</year>). <article-title>A neural system for error detection and compensation</article-title>. <source>Psychol. Sci</source>. <volume>4</volume>, <fpage>385</fpage>&#x02013;<lpage>390</lpage>. <pub-id pub-id-type="doi">10.1111/j.1467-9280.1993.tb00586.x</pub-id></citation>
</ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gramfort</surname> <given-names>A.</given-names></name> <name><surname>Luessi</surname> <given-names>M.</given-names></name> <name><surname>Larson</surname> <given-names>E.</given-names></name> <name><surname>Engemann</surname> <given-names>D. A.</given-names></name> <name><surname>Strohmeier</surname> <given-names>D.</given-names></name> <name><surname>Brodbeck</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>MNE software for processing meg and EEG data</article-title>. <source>Neuroimage</source> <volume>86</volume>, <fpage>446</fpage>&#x02013;<lpage>460</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.10.027</pub-id><pub-id pub-id-type="pmid">24161808</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Groppe</surname> <given-names>D. M.</given-names></name> <name><surname>Urbach</surname> <given-names>T. P.</given-names></name> <name><surname>Kutas</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>Mass univariate analysis of event-related brain potentials/fields I: a critical tutorial review</article-title>. <source>Psychophysiology</source> <volume>48</volume>, <fpage>1711</fpage>&#x02013;<lpage>1725</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.2011.01273.x</pub-id><pub-id pub-id-type="pmid">21895683</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hajcak</surname> <given-names>G.</given-names></name> <name><surname>Foti</surname> <given-names>D.</given-names></name></person-group> (<year>2020</year>). <article-title>Significance?&#x02026; Significance! empirical, methodological, and theoretical connections between the late positive potential and p300 as neural responses to stimulus significance: an integrative review</article-title>. <source>Psychophysiology</source> <volume>57</volume>:<fpage>e13570</fpage>. <pub-id pub-id-type="doi">10.1111/psyp.13570</pub-id><pub-id pub-id-type="pmid">32243623</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Haufe</surname> <given-names>S.</given-names></name> <name><surname>Meinecke</surname> <given-names>F.</given-names></name> <name><surname>G&#x000F6;rgen</surname> <given-names>K.</given-names></name> <name><surname>D&#x000E4;hne</surname> <given-names>S.</given-names></name> <name><surname>Haynes</surname> <given-names>J.-D.</given-names></name> <name><surname>Blankertz</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>On the interpretation of weight vectors of linear models in multivariate neuroimaging</article-title>. <source>Neuroimage</source> <volume>87</volume>, <fpage>96</fpage>&#x02013;<lpage>110</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2013.10.067</pub-id><pub-id pub-id-type="pmid">24239590</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hipp</surname> <given-names>J. F.</given-names></name> <name><surname>Siegel</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Dissociating neuronal gamma-band activity from cranial and ocular muscle activity in EEG</article-title>. <source>Front. Hum. Neurosci</source>. <volume>7</volume>:<fpage>338</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2013.00338</pub-id><pub-id pub-id-type="pmid">23847508</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Holdgraf</surname> <given-names>C. R.</given-names></name> <name><surname>Rieger</surname> <given-names>J. W.</given-names></name> <name><surname>Micheli</surname> <given-names>C.</given-names></name> <name><surname>Martin</surname> <given-names>S.</given-names></name> <name><surname>Knight</surname> <given-names>R. T.</given-names></name> <name><surname>Theunissen</surname> <given-names>F. E.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Encoding and decoding models in cognitive electrophysiology</article-title>. <source>Front. Syst. Neurosci</source>. <volume>11</volume>:<fpage>61</fpage>. <pub-id pub-id-type="doi">10.3389/fnsys.2017.00061</pub-id><pub-id pub-id-type="pmid">29018336</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Iturrate</surname> <given-names>I.</given-names></name> <name><surname>Chavarriaga</surname> <given-names>R.</given-names></name> <name><surname>Montesano</surname> <given-names>L.</given-names></name> <name><surname>Minguez</surname> <given-names>J.</given-names></name> <name><surname>Mill&#x000E1;n</surname> <given-names>J. R.</given-names></name></person-group> (<year>2015</year>). <article-title>Teaching brain-machine interfaces as an alternative paradigm to neuroprosthetics control</article-title>. <source>Sci. Rep</source>. <volume>5</volume>:<fpage>13893</fpage>. <pub-id pub-id-type="doi">10.1038/srep13893</pub-id><pub-id pub-id-type="pmid">26354145</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Iturrate</surname> <given-names>I.</given-names></name> <name><surname>Montesano</surname> <given-names>L.</given-names></name> <name><surname>Minguez</surname> <given-names>J.</given-names></name></person-group> (<year>2010</year>). <article-title>&#x0201C;Robot reinforcement learning using EEG-based reward signals,&#x0201D;</article-title> in <source>2010 IEEE International Conference on Robotics and Automation</source> (<publisher-loc>Anchorage, AK</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4822</fpage>&#x02013;<lpage>4829</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.2010.5509734</pub-id><pub-id pub-id-type="pmid">27534393</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kayser</surname> <given-names>J.</given-names></name> <name><surname>Tenke</surname> <given-names>C. E.</given-names></name></person-group> (<year>2015</year>). <article-title>On the benefits of using surface Laplacian (current source density) methodology in electrophysiology</article-title>. <source>Int. J. Psychophysiol</source>. 97, 171. <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2015.06.001</pub-id><pub-id pub-id-type="pmid">26071227</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>S. K.</given-names></name> <name><surname>Kirchner</surname> <given-names>E. A.</given-names></name> <name><surname>Kirchner</surname> <given-names>F.</given-names></name></person-group> (<year>2020</year>). <article-title>&#x0201C;Flexible online adaptation of learning strategy using EEG-based reinforcement signals in real-world robotic applications,&#x0201D;</article-title> in <source>2020 IEEE International Conference on Robotics and Automation (ICRA)</source> (<publisher-loc>Paris</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4885</fpage>&#x02013;<lpage>4891</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA40945.2020.9197538</pub-id></citation>
</ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>S. K.</given-names></name> <name><surname>Kirchner</surname> <given-names>E. A.</given-names></name> <name><surname>Stefes</surname> <given-names>A.</given-names></name> <name><surname>Kirchner</surname> <given-names>F.</given-names></name></person-group> (<year>2017</year>). <article-title>Intrinsic interactive reinforcement learning-using error-related potentials for real world human-robot interaction</article-title>. <source>Sci. Rep</source>. <volume>7</volume>, <fpage>1</fpage>&#x02013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-017-17682-7</pub-id><pub-id pub-id-type="pmid">29242555</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Koban</surname> <given-names>L.</given-names></name> <name><surname>Pourtois</surname> <given-names>G.</given-names></name> <name><surname>Vocat</surname> <given-names>R.</given-names></name> <name><surname>Vuilleumier</surname> <given-names>P.</given-names></name></person-group> (<year>2010</year>). <article-title>When your errors make me lose or win: event-related potentials to observed errors of cooperators and competitors</article-title>. <source>Soc. Neurosci</source>. <volume>5</volume>, <fpage>360</fpage>&#x02013;<lpage>374</lpage>. <pub-id pub-id-type="doi">10.1080/17470911003651547</pub-id><pub-id pub-id-type="pmid">20349391</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kreilinger</surname> <given-names>A.</given-names></name> <name><surname>Neuper</surname> <given-names>C.</given-names></name> <name><surname>M&#x000FC;ller-Putz</surname> <given-names>G. R.</given-names></name></person-group> (<year>2012</year>). <article-title>Error potential detection during continuous movement of an artificial arm controlled by brain-computer interface</article-title>. <source>Med. Biol. Eng. Comput</source>. <volume>50</volume>, <fpage>223</fpage>&#x02013;<lpage>230</lpage>. <pub-id pub-id-type="doi">10.1007/s11517-011-0858-4</pub-id><pub-id pub-id-type="pmid">22210463</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kriegeskorte</surname> <given-names>N.</given-names></name> <name><surname>Douglas</surname> <given-names>P. K.</given-names></name></person-group> (<year>2019</year>). <article-title>Interpreting encoding and decoding models</article-title>. <source>Curr. Opin. Neurobiol</source>. <volume>55</volume>, <fpage>167</fpage>&#x02013;<lpage>179</lpage>. <pub-id pub-id-type="doi">10.1016/j.conb.2019.04.002</pub-id><pub-id pub-id-type="pmid">31039527</pub-id></citation></ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kutas</surname> <given-names>M.</given-names></name> <name><surname>Hillyard</surname> <given-names>S. A.</given-names></name></person-group> (<year>1980</year>). <article-title>Reading senseless sentences: brain potentials reflect semantic incongruity</article-title>. <source>Science</source> <volume>207</volume>, <fpage>203</fpage>&#x02013;<lpage>205</lpage>. <pub-id pub-id-type="doi">10.1126/science.7350657</pub-id><pub-id pub-id-type="pmid">7350657</pub-id></citation></ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ladouce</surname> <given-names>S.</given-names></name> <name><surname>Mustile</surname> <given-names>M.</given-names></name> <name><surname>Ietswaart</surname> <given-names>M.</given-names></name> <name><surname>Dehais</surname> <given-names>F.</given-names></name></person-group> (<year>2022</year>). <article-title>Capturing cognitive events embedded in the real world using mobile electroencephalography and eye-tracking</article-title>. <source>J. Cogn. Neurosci</source>. <volume>34</volume>, <fpage>2237</fpage>&#x02013;<lpage>2255</lpage>. <pub-id pub-id-type="doi">10.1162/jocn_a_01903</pub-id><pub-id pub-id-type="pmid">36007068</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>T.-W.</given-names></name> <name><surname>Girolami</surname> <given-names>M.</given-names></name> <name><surname>Sejnowski</surname> <given-names>T. J.</given-names></name></person-group> (<year>1999</year>). <article-title>Independent component analysis using an extended infomax algorithm for mixed subgaussian and supergaussian sources</article-title>. <source>Neural Comput</source>. <volume>11</volume>, <fpage>417</fpage>&#x02013;<lpage>441</lpage>. <pub-id pub-id-type="doi">10.1162/089976699300016719</pub-id><pub-id pub-id-type="pmid">9950738</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lotte</surname> <given-names>F.</given-names></name> <name><surname>Bougrain</surname> <given-names>L.</given-names></name> <name><surname>Cichocki</surname> <given-names>A.</given-names></name> <name><surname>Clerc</surname> <given-names>M.</given-names></name> <name><surname>Congedo</surname> <given-names>M.</given-names></name> <name><surname>Rakotomamonjy</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>A review of classification algorithms for EEG-based brain-computer interfaces: a 10 year update</article-title>. <source>J. Neural Eng</source>. 15::031005. <pub-id pub-id-type="doi">10.1088/1741-2552/aab2f2</pub-id><pub-id pub-id-type="pmid">29488902</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Luo</surname> <given-names>T.-j.</given-names></name> <name><surname>Fan</surname> <given-names>Y.-c.</given-names></name> <name><surname>Lv</surname> <given-names>J.-t.</given-names></name> <name><surname>Zhou</surname> <given-names>C.</given-names></name></person-group> (<year>2018</year>). <article-title>&#x0201C;Deep reinforcement learning from error-related potentials via an EEG-based brain-computer interface,&#x0201D;</article-title> in <source>2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</source> (<publisher-loc>Madrid</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>697</fpage>&#x02013;<lpage>701</lpage>. <pub-id pub-id-type="doi">10.1109/BIBM.2018.8621183</pub-id></citation>
</ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Maris</surname> <given-names>E.</given-names></name> <name><surname>Oostenveld</surname> <given-names>R.</given-names></name></person-group> (<year>2007</year>). <article-title>Nonparametric statistical testing of EEG-and meg-data</article-title>. <source>J. Neurosci. Methods</source> <volume>164</volume>, <fpage>177</fpage>&#x02013;<lpage>190</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2007.03.024</pub-id><pub-id pub-id-type="pmid">17517438</pub-id></citation></ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marsicano</surname> <given-names>G.</given-names></name> <name><surname>Bertini</surname> <given-names>C.</given-names></name> <name><surname>Ronconi</surname> <given-names>L.</given-names></name></person-group> (<year>2024</year>). <article-title>Decoding cognition in neurodevelopmental, psychiatric and neurological conditions with multivariate pattern analysis of EEG data</article-title>. <source>Neurosci. Biobehav. Rev</source>. <volume>164</volume>:<fpage>105795</fpage>. <pub-id pub-id-type="doi">10.1016/j.neubiorev.2024.105795</pub-id><pub-id pub-id-type="pmid">38977116</pub-id></citation></ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moustris</surname> <given-names>G. P.</given-names></name> <name><surname>Hiridis</surname> <given-names>S. C.</given-names></name> <name><surname>Deliparaschos</surname> <given-names>K. M.</given-names></name> <name><surname>Konstantinidis</surname> <given-names>K. M.</given-names></name></person-group> (<year>2011</year>). <article-title>Evolution of autonomous and semi-autonomous robotic surgical systems: a review of the literature</article-title>. <source>Int. J. Med. Robot. Comput. Assist. Surg</source>. <volume>7</volume>, <fpage>375</fpage>&#x02013;<lpage>392</lpage>. <pub-id pub-id-type="doi">10.1002/rcs.408</pub-id><pub-id pub-id-type="pmid">21815238</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Nguyen</surname> <given-names>N. D.</given-names></name> <name><surname>Nguyen</surname> <given-names>T.</given-names></name> <name><surname>Nahavandi</surname> <given-names>S.</given-names></name> <name><surname>Bhatti</surname> <given-names>A.</given-names></name> <name><surname>Guest</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;Manipulating soft tissues by deep reinforcement learning for autonomous robotic surgery,&#x0201D;</article-title> in <source>2019 IEEE International Systems Conference (SysCon)</source> (<publisher-loc>Orlando, FL</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1109/SYSCON.2019.8836924</pub-id></citation>
</ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nolan</surname> <given-names>H.</given-names></name> <name><surname>Whelan</surname> <given-names>R.</given-names></name> <name><surname>Reilly</surname> <given-names>R. B.</given-names></name></person-group> (<year>2010</year>). <article-title>Faster: fully automated statistical thresholding for EEG artifact rejection</article-title>. <source>J. Neurosci. Methods</source> <volume>192</volume>, <fpage>152</fpage>&#x02013;<lpage>162</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2010.07.015</pub-id><pub-id pub-id-type="pmid">20654646</pub-id></citation></ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oliveira</surname> <given-names>F. T.</given-names></name> <name><surname>McDonald</surname> <given-names>J. J.</given-names></name> <name><surname>Goodman</surname> <given-names>D.</given-names></name></person-group> (<year>2007</year>). <article-title>Performance monitoring in the anterior cingulate is not all error related: expectancy deviation and the representation of action-outcome associations</article-title>. <source>J. Cogn. Neurosci</source>. <volume>19</volume>, <fpage>1994</fpage>&#x02013;<lpage>2004</lpage>. <pub-id pub-id-type="doi">10.1162/jocn.2007.19.12.1994</pub-id><pub-id pub-id-type="pmid">17892382</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Osa</surname> <given-names>T.</given-names></name> <name><surname>Sugita</surname> <given-names>N.</given-names></name> <name><surname>Mitsuishi</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>&#x0201C;Online trajectory planning in dynamic environments for surgical task automation,&#x0201D;</article-title> in <source>Robotics: Science and Systems X</source>, eds. D. Fox, L. E. Kavraki, and H. Kurniawati (Berkeley, CA: University of California), <fpage>1</fpage>&#x02013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.15607/RSS.2014.X.011</pub-id></citation>
</ref>
<ref id="B47">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Padrao</surname> <given-names>G.</given-names></name> <name><surname>Gonzalez-Franco</surname> <given-names>M.</given-names></name> <name><surname>Sanchez-Vives</surname> <given-names>M. V.</given-names></name> <name><surname>Slater</surname> <given-names>M.</given-names></name> <name><surname>Rodriguez-Fornells</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Violating body movement semantics: neural signatures of self-generated and external-generated errors</article-title>. <source>Neuroimage</source> <volume>124</volume>, <fpage>147</fpage>&#x02013;<lpage>156</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2015.08.022</pub-id><pub-id pub-id-type="pmid">26282856</pub-id></citation></ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pailing</surname> <given-names>P. E.</given-names></name> <name><surname>Segalowitz</surname> <given-names>S. J.</given-names></name></person-group> (<year>2004</year>). <article-title>The effects of uncertainty in error monitoring on associated ERPs</article-title>. <source>Brain Cogn</source>. <volume>56</volume>, <fpage>215</fpage>&#x02013;<lpage>233</lpage>. <pub-id pub-id-type="doi">10.1016/j.bandc.2004.06.005</pub-id><pub-id pub-id-type="pmid">15518937</pub-id></citation></ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pandya</surname> <given-names>A.</given-names></name> <name><surname>Reisner</surname> <given-names>L. A.</given-names></name> <name><surname>King</surname> <given-names>B.</given-names></name> <name><surname>Lucas</surname> <given-names>N.</given-names></name> <name><surname>Composto</surname> <given-names>A.</given-names></name> <name><surname>Klein</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>A review of camera viewpoint automation in robotic and laparoscopic surgery</article-title>. <source>Robotics</source> <volume>3</volume>, <fpage>310</fpage>&#x02013;<lpage>329</lpage>. <pub-id pub-id-type="doi">10.3390/robotics3030310</pub-id></citation>
</ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pavone</surname> <given-names>E. F.</given-names></name> <name><surname>Tieri</surname> <given-names>G.</given-names></name> <name><surname>Rizza</surname> <given-names>G.</given-names></name> <name><surname>Tidoni</surname> <given-names>E.</given-names></name> <name><surname>Grisoni</surname> <given-names>L.</given-names></name> <name><surname>Aglioti</surname> <given-names>S. M.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Embodying others in immersive virtual reality: electro-cortical signatures of monitoring the errors in the actions of an avatar seen from a first-person perspective</article-title>. <source>J. Neurosci</source>. <volume>36</volume>, <fpage>268</fpage>&#x02013;<lpage>279</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.0494-15.2016</pub-id><pub-id pub-id-type="pmid">26758821</pub-id></citation></ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Penaloza</surname> <given-names>C. I.</given-names></name> <name><surname>Mae</surname> <given-names>Y.</given-names></name> <name><surname>Kojima</surname> <given-names>M.</given-names></name> <name><surname>Arai</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). <article-title>Brain signal-based safety measure activation for robotic systems</article-title>. <source>Adv. Robot</source>. <volume>29</volume>, <fpage>1234</fpage>&#x02013;<lpage>1242</lpage>. <pub-id pub-id-type="doi">10.1080/01691864.2015.1057615</pub-id></citation>
</ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pernet</surname> <given-names>C.</given-names></name> <name><surname>Latinus</surname> <given-names>M.</given-names></name> <name><surname>Nichols</surname> <given-names>T.</given-names></name> <name><surname>Rousselet</surname> <given-names>G.</given-names></name></person-group> (<year>2015</year>). <article-title>Cluster-based computational methods for mass univariate analyses of event-related brain potentials/fields: a simulation study</article-title>. <source>J. Neurosci. Methods</source> <volume>250</volume>, <fpage>85</fpage>&#x02013;<lpage>93</lpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2014.08.003</pub-id><pub-id pub-id-type="pmid">25128255</pub-id></citation></ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Perrin</surname> <given-names>F.</given-names></name> <name><surname>Pernier</surname> <given-names>J.</given-names></name> <name><surname>Bertrand</surname> <given-names>O.</given-names></name> <name><surname>Echallier</surname> <given-names>J. F.</given-names></name></person-group> (<year>1989</year>). <article-title>Spherical splines for scalp potential and current density mapping</article-title>. <source>Electroencephalogr. Clin. Neurophysiol</source>. <volume>72</volume>, <fpage>184</fpage>&#x02013;<lpage>187</lpage>. <pub-id pub-id-type="doi">10.1016/0013-4694(89)90180-6</pub-id><pub-id pub-id-type="pmid">2464490</pub-id></citation></ref>
<ref id="B54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Polich</surname> <given-names>J.</given-names></name></person-group> (<year>2007</year>). <article-title>Updating p300: an integrative theory of p3a and p3b</article-title>. <source>Clin. Neurophysiol</source>. <volume>118</volume>, <fpage>2128</fpage>&#x02013;<lpage>2148</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinph.2007.04.019</pub-id><pub-id pub-id-type="pmid">17573239</pub-id></citation></ref>
<ref id="B55">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Protzak</surname> <given-names>J.</given-names></name> <name><surname>Ihme</surname> <given-names>K.</given-names></name> <name><surname>Zander</surname> <given-names>T. O.</given-names></name></person-group> (<year>2013</year>). <article-title>&#x0201C;A passive brain-computer interface for supporting gaze-based human-machine interaction,&#x0201D;</article-title> in <source>Universal Access in Human-Computer Interaction. Design Methods, Tools, and Interaction Techniques for eInclusion: 7th International Conference, UAHCI 2013, Held as Part of HCI International 2013, Las Vegas, NV, USA, July 21-26, 2013, Proceedings, Part I 7</source> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>662</fpage>&#x02013;<lpage>671</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-642-39188-0_71</pub-id></citation>
</ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Richter</surname> <given-names>F.</given-names></name> <name><surname>Orosco</surname> <given-names>R. K.</given-names></name> <name><surname>Yip</surname> <given-names>M. C.</given-names></name></person-group> (<year>2019</year>). <article-title>Open-sourced reinforcement learning environments for surgical robotics</article-title>. <source>arXiv</source> [Preprint]. arXiv:1903.02090. <pub-id pub-id-type="doi">10.48550/arXiv.1903.02090</pub-id></citation>
</ref>
<ref id="B57">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ridderinkhof</surname> <given-names>K. R.</given-names></name> <name><surname>Ramautar</surname> <given-names>J. R.</given-names></name> <name><surname>Wijnen</surname> <given-names>J. G.</given-names></name></person-group> (<year>2009</year>). <article-title>To pe or not to pe: a p3-like ERP component reflecting the processing of response errors</article-title>. <source>Psychophysiology</source> <volume>46</volume>, <fpage>531</fpage>&#x02013;<lpage>538</lpage>. <pub-id pub-id-type="doi">10.1111/j.1469-8986.2009.00790.x</pub-id><pub-id pub-id-type="pmid">19226310</pub-id></citation></ref>
<ref id="B58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rivet</surname> <given-names>B.</given-names></name> <name><surname>Souloumiac</surname> <given-names>A.</given-names></name> <name><surname>Attina</surname> <given-names>V.</given-names></name> <name><surname>Gibert</surname> <given-names>G.</given-names></name></person-group> (<year>2009</year>). <article-title>Xdawn algorithm to enhance evoked potentials: application to brain-computer interface</article-title>. <source>IEEE Trans. Biomed. Eng</source>. <volume>56</volume>, <fpage>2035</fpage>&#x02013;<lpage>2043</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2009.2012869</pub-id><pub-id pub-id-type="pmid">19174332</pub-id></citation></ref>
<ref id="B59">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Salazar-Gomez</surname> <given-names>A. F.</given-names></name> <name><surname>DelPreto</surname> <given-names>J.</given-names></name> <name><surname>Gil</surname> <given-names>S.</given-names></name> <name><surname>Guenther</surname> <given-names>F. H.</given-names></name> <name><surname>Rus</surname> <given-names>D.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;Correcting robot mistakes in real time using EEG signals,&#x0201D;</article-title> in <source>2017 IEEE international conference on robotics and automation (ICRA)</source> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>6570</fpage>&#x02013;<lpage>6577</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2017.7989777</pub-id><pub-id pub-id-type="pmid">39086374</pub-id></citation></ref>
<ref id="B60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sassenhagen</surname> <given-names>J.</given-names></name> <name><surname>Schlesewsky</surname> <given-names>M.</given-names></name> <name><surname>Bornkessel-Schlesewsky</surname> <given-names>I.</given-names></name></person-group> (<year>2014</year>). <article-title>The p600-as-p3 hypothesis revisited: Single-trial analyses reveal that the late EEG positivity following linguistically deviant material is reaction time aligned</article-title>. <source>Brain Lang</source>. <volume>137</volume>, <fpage>29</fpage>&#x02013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1016/j.bandl.2014.07.010</pub-id><pub-id pub-id-type="pmid">25151545</pub-id></citation></ref>
<ref id="B61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scheffers</surname> <given-names>M. K.</given-names></name> <name><surname>Coles</surname> <given-names>M. G.</given-names></name></person-group> (<year>2000</year>). <article-title>Performance monitoring in a confusing world: error-related brain activity, judgments of response accuracy, and types of errors</article-title>. <source>J. Exp. Psychol</source>. <volume>26</volume>:<fpage>141</fpage>. <pub-id pub-id-type="doi">10.1037//0096-1523.26.1.141</pub-id><pub-id pub-id-type="pmid">10696610</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scheikl</surname> <given-names>P. M.</given-names></name> <name><surname>Schreiber</surname> <given-names>N.</given-names></name> <name><surname>Haas</surname> <given-names>C.</given-names></name> <name><surname>Freymuth</surname> <given-names>N.</given-names></name> <name><surname>Neumann</surname> <given-names>G.</given-names></name> <name><surname>Lioutikov</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Movement primitive diffusion: learning gentle robotic manipulation of deformable objects</article-title>. <source>IEEE Robot. Autom. Lett</source>. <volume>9</volume>, <fpage>5338</fpage>&#x02013;<lpage>5345</lpage>. <pub-id pub-id-type="doi">10.1109/LRA.2024.3382529</pub-id></citation>
</ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Simola</surname> <given-names>J.</given-names></name> <name><surname>Le Fevre</surname> <given-names>K.</given-names></name> <name><surname>Torniainen</surname> <given-names>J.</given-names></name> <name><surname>Baccino</surname> <given-names>T.</given-names></name></person-group> (<year>2015</year>). <article-title>Affective processing in natural scene viewing: valence and arousal interactions in eye-fixation-related potentials</article-title>. <source>Neuroimage</source> <volume>106</volume>, <fpage>21</fpage>&#x02013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.11.030</pub-id><pub-id pub-id-type="pmid">25463473</pub-id></citation></ref>
<ref id="B64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Somon</surname> <given-names>B.</given-names></name> <name><surname>Campagne</surname> <given-names>A.</given-names></name> <name><surname>Delorme</surname> <given-names>A.</given-names></name> <name><surname>Berberian</surname> <given-names>B.</given-names></name></person-group> (<year>2017</year>). <article-title>Performance monitoring applied to system supervision</article-title>. <source>Front. Hum. Neurosci</source>. <volume>11</volume>:<fpage>360</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2017.00360</pub-id><pub-id pub-id-type="pmid">28744209</pub-id></citation></ref>
<ref id="B65">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Somon</surname> <given-names>B.</given-names></name> <name><surname>Campagne</surname> <given-names>A.</given-names></name> <name><surname>Delorme</surname> <given-names>A.</given-names></name> <name><surname>Berberian</surname> <given-names>B.</given-names></name></person-group> (<year>2019</year>). <article-title>Evaluation of performance monitoring ERPs through difficulty manipulation in a response-feedback paradigm</article-title>. <source>Brain Res</source>. <volume>1704</volume>, <fpage>196</fpage>&#x02013;<lpage>206</lpage>. <pub-id pub-id-type="doi">10.1016/j.brainres.2018.10.007</pub-id><pub-id pub-id-type="pmid">30300637</pub-id></citation></ref>
<ref id="B66">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sp&#x000FC;ler</surname> <given-names>M.</given-names></name> <name><surname>Niethammer</surname> <given-names>C.</given-names></name></person-group> (<year>2015</year>). <article-title>Error-related potentials during continuous feedback: using EEG to detect errors of different type and severity</article-title>. <source>Front. Hum. Neurosci</source>. <volume>9</volume>:<fpage>155</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2015.00155</pub-id><pub-id pub-id-type="pmid">25859204</pub-id></citation></ref>
<ref id="B67">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sutton</surname> <given-names>S.</given-names></name> <name><surname>Braren</surname> <given-names>M.</given-names></name> <name><surname>Zubin</surname> <given-names>J.</given-names></name> <name><surname>John</surname> <given-names>E.</given-names></name></person-group> (<year>1965</year>). <article-title>Evoked-potential correlates of stimulus uncertainty</article-title>. <source>Science</source> <volume>150</volume>, <fpage>1187</fpage>&#x02013;<lpage>1188</lpage>. <pub-id pub-id-type="doi">10.1126/science.150.3700.1187</pub-id><pub-id pub-id-type="pmid">5852977</pub-id></citation></ref>
<ref id="B68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Thananjeyan</surname> <given-names>B.</given-names></name> <name><surname>Garg</surname> <given-names>A.</given-names></name> <name><surname>Krishnan</surname> <given-names>S.</given-names></name> <name><surname>Chen</surname> <given-names>C.</given-names></name> <name><surname>Miller</surname> <given-names>L.</given-names></name> <name><surname>Goldberg</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2017</year>). &#x0201C;Multilateral surgical pattern cutting in 2d orthotropic gauze with deep reinforcement learning policies for tensioning,&#x0201D; <italic>2017 IEEE International Conference on Robotics and Automation (ICRA)</italic> (Singapore: IEEE), <fpage>2371</fpage>&#x02013;<lpage>2378</lpage>. <pub-id pub-id-type="doi">10.1109/ICRA.2017.7989275</pub-id></citation>
</ref>
<ref id="B69">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ullsperger</surname> <given-names>M.</given-names></name> <name><surname>Fischer</surname> <given-names>A. G.</given-names></name> <name><surname>Nigbur</surname> <given-names>R.</given-names></name> <name><surname>Endrass</surname> <given-names>T.</given-names></name></person-group> (<year>2014</year>). <article-title>Neural mechanisms and temporal dynamics of performance monitoring</article-title>. <source>Trends Cogn. Sci</source>. <volume>18</volume>, <fpage>259</fpage>&#x02013;<lpage>267</lpage>. <pub-id pub-id-type="doi">10.1016/j.tics.2014.02.009</pub-id><pub-id pub-id-type="pmid">24656460</pub-id></citation></ref>
<ref id="B70">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Van Den Berg</surname> <given-names>J.</given-names></name> <name><surname>Miller</surname> <given-names>S.</given-names></name> <name><surname>Duckworth</surname> <given-names>D.</given-names></name> <name><surname>Hu</surname> <given-names>H.</given-names></name> <name><surname>Wan</surname> <given-names>A.</given-names></name> <name><surname>Fu</surname> <given-names>X.-Y.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>&#x0201C;Superhuman performance of surgical tasks by robots using iterative learning from human-guided demonstrations,&#x0201D;</article-title> in <source>2010 IEEE International Conference on Robotics and Automation</source> (<publisher-loc>Anchorage, AK</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>2074</fpage>&#x02013;<lpage>2081</lpage>. <pub-id pub-id-type="doi">10.1109/ROBOT.2010.5509621</pub-id></citation>
</ref>
<ref id="B71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Schie</surname> <given-names>H. T.</given-names></name> <name><surname>Mars</surname> <given-names>R. B.</given-names></name> <name><surname>Coles</surname> <given-names>M. G.</given-names></name> <name><surname>Bekkering</surname> <given-names>H.</given-names></name></person-group> (<year>2004</year>). <article-title>Modulation of activity in medial frontal and motor cortices during error observation</article-title>. <source>Nat. Neurosci</source>. <volume>7</volume>, <fpage>549</fpage>&#x02013;<lpage>554</lpage>. <pub-id pub-id-type="doi">10.1038/nn1239</pub-id><pub-id pub-id-type="pmid">15107858</pub-id></citation></ref>
<ref id="B72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vukeli&#x00107;</surname> <given-names>M.</given-names></name> <name><surname>Bui</surname> <given-names>M.</given-names></name> <name><surname>Vorreuther</surname> <given-names>A.</given-names></name> <name><surname>Lingelbach</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <article-title>Combining brain-computer interfaces with deep reinforcement learning for robot training: a feasibility study in a simulation environment</article-title>. <source>Front. Neuroergonom</source>. <volume>4</volume>:<fpage>1274730</fpage>. <pub-id pub-id-type="doi">10.3389/fnrgo.2023.1274730</pub-id><pub-id pub-id-type="pmid">38234482</pub-id></citation></ref>
<ref id="B73">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zander</surname> <given-names>T. O.</given-names></name> <name><surname>Shetty</surname> <given-names>K.</given-names></name> <name><surname>Lorenz</surname> <given-names>R.</given-names></name> <name><surname>Leff</surname> <given-names>D. R.</given-names></name> <name><surname>Krol</surname> <given-names>L. R.</given-names></name> <name><surname>Darzi</surname> <given-names>A. W.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Automated task load detection with electroencephalography: towards passive brain-computer interfacing in robotic surgery</article-title>. <source>J. Med. Robot. Res</source>. <volume>2</volume>:<fpage>1750003</fpage>. <pub-id pub-id-type="doi">10.1142/S2424905X17500039</pub-id></citation>
</ref>
</ref-list>
</back>
</article>