﻿<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="systematic-review">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neuroergonomics</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroergonomics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neuroergonomics</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2673-6195</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnrgo.2026.1756956</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Systematic Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Reviewing digital collaborative interactions with multimodal hyperscanning through an ever-growing database</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Vorreuther</surname> <given-names>Anna</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/2573606"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Brouwer</surname> <given-names>Anne-Marie</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/11316"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Vukeli&#x00107;</surname> <given-names>Mathias</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/171128"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Applied Neurocognitive Systems, Institute of Human Factors and Technology Management IAT, University of Stuttgart</institution>, <city>Stuttgart</city>, <country country="de">Germany</country></aff>
<aff id="aff2"><label>2</label><institution>Artificial Intelligence, Donders Centre for Brain, Cognition and Behavior, Radboud University</institution>, <city>Nijmegen</city>, <country>Netherlands</country></aff>
<aff id="aff3"><label>3</label><institution>Human Performance, Netherlands Organization for Applied Scientific Research (TNO)</institution>, <city>Soesterberg</city>, <country>Netherlands</country></aff>
<aff id="aff4"><label>4</label><institution>Applied Neurocognitive Systems, Fraunhofer-Institute for Industrial Engineering IAO</institution>, <city>Stuttgart</city>, <country country="de">Germany</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Anna Vorreuther, <email xlink:href="mailto:anna.vorreuther@iat.uni-stuttgart.de">anna.vorreuther@iat.uni-stuttgart.de</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-10">
<day>10</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>7</volume>
<elocation-id>1756956</elocation-id>
<history>
<date date-type="received">
<day>29</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>09</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>21</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Vorreuther, Brouwer and Vukeli&#x00107;.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Vorreuther, Brouwer and Vukeli&#x00107;</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-10">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Digital technologies now mediate a substantial proportion of human collaboration, reshaping how individuals coordinate attention, share information, and jointly act on goals. These digitally mediated interactions engage neural, physiological, and behavioral processes differently compared to face-to-face settings. Mobile hyperscanning, i.e., simultaneous (neuro-)physiological measures of two or more individuals, offers a unique window into these multidimensional dynamics. Yet, the existing literature is highly fragmented in design, modality, and analytic rigor, making it difficult to accumulate knowledge. This review systematically synthesizes hyperscanning research investigating collaboration involving digital components and identifies key methodological and conceptual gaps that must be addressed to advance the field.</p></sec>
<sec>
<title>Methods</title>
<p>We searched Scopus, PubMed, and Web of Science (April 2025) for mobile hyperscanning studies on digital collaboration. Forty-five eligible studies involving simultaneous measurements of at least two healthy adults engaged in collaborative tasks with a digital interaction component were included. Studies were categorized across 13 dimensions, including modality, task design, interaction type, analysis method, and cognitive domain. To ensure transparency and support cumulative synthesis, we created a continuously updated online resource (&#x0201C;InterBrainDB&#x0201D;).</p></sec>
<sec>
<title>Results</title>
<p>Most studies relied on unimodal neuroimaging, predominantly electroencephalography (EEG) or functional near-infrared spectroscopy (fNIRS), with only seven studies implementing multimodal combinations. Study designs favored cooperative tasks or naturalistic scenarios with symmetrical roles, typically using same-sex dyads of unfamiliar individuals. Non-verbal interaction was studied slightly more often than verbal. Analytically, functional connectivity dominated, whereas effective connectivity, multimodal fusion, and machine learning were scarcely used. Executive and social cognition were more frequently investigated than creativity, memory, and language.</p></sec>
<sec>
<title>Discussion</title>
<p>Research on digital collaboration through hyperscanning is growing, yet progress is limited by methodological heterogeneity, narrow use of modalities, and analytical conservatism. Future advances will require: (1) multimodal integration to fully capture neural, physiological, and behavioral dynamics; (2) systematic comparisons across varying degrees of digitalization to understand how technology shapes interaction; (3) physiology-informed analysis frameworks capable of modeling high-dimensional interpersonal dynamics; and (4) clearer reporting standards to enable reproducibility and large-scale synthesis. Resources like our InterBrainDB can structure a community-driven progress toward ecologically grounded models of digitally mediated collaboration, a domain of increasing scientific and societal relevance.</p></sec></abstract>
<kwd-group>
<kwd>collaboration</kwd>
<kwd>database</kwd>
<kwd>digitalization</kwd>
<kwd>electroencephalography</kwd>
<kwd>eye-tracking</kwd>
<kwd>functional near-infrared spectroscopy</kwd>
<kwd>hyperscanning</kwd>
<kwd>review</kwd>
</kwd-group>
<funding-group>
  <funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the Fraunhofer Internal Programs project &#x0201C;INSTANCE II &#x02013; Immersive Industrial Innovation Ecosystems.&#x0201D;</funding-statement>
</funding-group>
<counts>
<fig-count count="9"/>
<table-count count="7"/>
<equation-count count="0"/>
<ref-count count="108"/>
<page-count count="24"/>
<word-count count="16020"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Cognitive Neuroergonomics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<sec>
<label>1.1</label>
<title>Measuring collaboration in the digital age</title>
<p>Collaboration, i.e., an activity where two or more individuals share a goal or intention that motivates joint work (<xref ref-type="bibr" rid="B51">L&#x000E9;n&#x000E9; et al., 2021</xref>; <xref ref-type="bibr" rid="B96">Wood and Gray, 1991</xref>), is one of the most prevalent topics under study in hyperscanning research. Much of the field has examined face-to-face social interaction under naturalistic conditions (<xref ref-type="bibr" rid="B20">Czeszumski et al., 2020</xref>; <xref ref-type="bibr" rid="B29">Fan et al., 2021</xref>; <xref ref-type="bibr" rid="B76">R&#x000E9;veill&#x000E9; et al., 2024</xref>; <xref ref-type="bibr" rid="B79">Schneider et al., 2021</xref>). Yet, collaboration in contemporary society increasingly occurs through digital means, including screen-based meetings, shared online workspaces, and immersive virtual environments (<xref ref-type="bibr" rid="B23">Desch&#x000EA;nes, 2024</xref>; <xref ref-type="bibr" rid="B98">Wu et al., 2023</xref>; <xref ref-type="bibr" rid="B100">Yang et al., 2025</xref>). These developments raise critical questions about how environmental conditions (virtual vs. in-person) and task-specific tools or modalities (e.g., digital vs. analog objects, verbal vs. text-based communication) influence the brain, body, and behavioral signatures of collaboration (<xref ref-type="bibr" rid="B50">Leahy et al., 2025</xref>; <xref ref-type="bibr" rid="B60">Magni et al., 2025</xref>; <xref ref-type="bibr" rid="B83">Solomon and Theiss, 2022</xref>). Digital media introduces a virtual divide (e.g., a monitor), altering attentional demands (<xref ref-type="bibr" rid="B15">Chuang and Hsu, 2023</xref>; <xref ref-type="bibr" rid="B82">Snijdewint and Scheepers, 2023</xref>), perception of implicit social cues (<xref ref-type="bibr" rid="B36">Frith and Frith, 2008</xref>; <xref ref-type="bibr" rid="B67">Oh et al., 2018</xref>; <xref ref-type="bibr" rid="B80">Sharan et al., 2022</xref>), and cognitive workload (<xref ref-type="bibr" rid="B57">Luebstorf et al., 2023</xref>; <xref ref-type="bibr" rid="B66">Nurmi and Pakarinen, 2023</xref>). As a result, findings from face-to-face hyperscanning studies cannot be assumed to generalize to digitally mediated settings, where collaborators rely on virtual rather than physical co-presence (<xref ref-type="bibr" rid="B5">Balconi et al., 2022</xref>; <xref ref-type="bibr" rid="B8">Balters et al., 2023</xref>; <xref ref-type="bibr" rid="B54">Liu et al., 2019</xref>; <xref ref-type="bibr" rid="B77">Sarasso et al., 2022</xref>, <xref ref-type="bibr" rid="B78">2024</xref>). Although prior work suggests that the degree of digitality modulates neural and behavioral responses (for reviews, see <xref ref-type="bibr" rid="B6">Balters et al., 2020</xref>; <xref ref-type="bibr" rid="B9">Barde et al., 2020</xref>), the literature remains fragmented by different choices in task, measurement, and analysis. These developments motivate a systematic review of how hyperscanning is currently used to investigate digital collaboration.</p>
</sec>
<sec>
<label>1.2</label>
<title>From hyperscanning origins to multimodal mobile neuroimaging</title>
<p>The term hyperscanning was introduced by (<xref ref-type="bibr" rid="B63">Montague et al. 2002</xref>) to describe simultaneous functional magnetic resonance imaging (fMRI) of interacting individuals playing a competitive deception task. Earlier work had already explored dual-brain recordings and reported synchronized electroencephalography (EEG) activity in the alpha band (8&#x02013;13 Hz) in identical twins (<xref ref-type="bibr" rid="B28">Duane and Behrendt, 1965</xref>). Since then, hyperscanning has expanded rapidly, driven by advances in mobile neuroimaging (for a review on the historical development see <xref ref-type="bibr" rid="B64">Nam et al., 2020</xref>). Today, both EEG and functional near-infrared spectroscopy (fNIRS) are the most widely used mobile brain-imaging modalities, offering complementary strengths: EEG provides high temporal resolution on a millisecond timescale, whereas fNIRS affords greater spatial specificity for cortical surface activity, making it well-suited for mapping spatial patterns of brain activation (for review see <xref ref-type="bibr" rid="B61">Mehta and Parasuraman, 2013</xref>). Recent advances in mobile neuroimaging and physiological sensing, together with increasingly accessible wearable sensors, have expanded hyperscanning beyond stationary setups such as fMRI or magnetoencephalography (MEG), enabling the study of interpersonal dynamics in increasingly diverse and applied settings (<xref ref-type="bibr" rid="B12">Carollo and Esposito, 2024</xref>).</p>
<p>Real-world and digitally mediated interactions unfold across multiple sensory channels and involve tightly coupled cognitive, affective, and behavioral processes (<xref ref-type="bibr" rid="B101">Zamm et al., 2024</xref>). Accordingly, reviews increasingly emphasize the value of multimodal and ecologically grounded frameworks of social interaction (<xref ref-type="bibr" rid="B41">Hakim et al., 2023</xref>; <xref ref-type="bibr" rid="B79">Schneider et al., 2021</xref>). Empirical work has begun to integrate brain-based methods with bodily measures such as gaze alignment (<xref ref-type="bibr" rid="B15">Chuang and Hsu, 2023</xref>), electrocardiography (ECG), electrodermal activity (EDA; <xref ref-type="bibr" rid="B65">Numata et al., 2021</xref>), communication signals (<xref ref-type="bibr" rid="B56">Lu et al., 2020</xref>), and other camera-based physiological metrics (<xref ref-type="bibr" rid="B81">Shih et al., 2024</xref>). This integration, called embodied hyperscanning (for review see Grasso-Cladera et al., <xref ref-type="bibr" rid="B38">2024</xref>), has facilitated the transition from controlled laboratory paradigms to applied settings such as classrooms (<xref ref-type="bibr" rid="B25">Dikker et al., 2017</xref>; <xref ref-type="bibr" rid="B105">Zhang et al., 2024</xref>), workplaces (<xref ref-type="bibr" rid="B93">Wikstr&#x000F6;m et al., 2021</xref>; <xref ref-type="bibr" rid="B98">Wu et al., 2023</xref>), and other real-world environments (<xref ref-type="bibr" rid="B8">Balters et al., 2023</xref>). In line with this evolution, we use the term &#x0201C;hyperscanning&#x0201D; here to encompass approaches that incorporate at least one brain- or body-based measurement modality of physiology.</p>
</sec>
<sec>
<label>1.3</label>
<title>Established and emerging analytic approaches</title>
<p>Irrespective of the signal measured, inter-brain synchrony (IBS) has been the most widely reported analysis approach for interpersonal dynamics in a variety of interactive scenarios (for review see <xref ref-type="bibr" rid="B76">R&#x000E9;veill&#x000E9; et al., 2024</xref>). IBS has been linked to various processes central to social interaction, including shared attention (<xref ref-type="bibr" rid="B25">Dikker et al., 2017</xref>; <xref ref-type="bibr" rid="B82">Snijdewint and Scheepers, 2023</xref>), simultaneous movements (<xref ref-type="bibr" rid="B40">Gumilar et al., 2021</xref>), imitation (<xref ref-type="bibr" rid="B22">Delaherche et al., 2015</xref>; <xref ref-type="bibr" rid="B47">Konvalinka et al., 2023</xref>), social closeness between partners (<xref ref-type="bibr" rid="B75">Reinero et al., 2021</xref>), and the nature of their engagement (e.g., cooperation vs. competition; <xref ref-type="bibr" rid="B43">Hayne et al., 2023</xref>). Given the pronounced sensitivity of IBS to contextual and situational factors, newly emerging digital collaboration formats warrant systematic investigation as they increasingly reflect naturalistic settings (<xref ref-type="bibr" rid="B29">Fan et al., 2021</xref>). Alongside synchrony-based metrics on the inter-brain level, multivariate modeling and machine learning have emerged in intra-brain research that could expand the analytical repertoire for hyperscanning, enabling improved decoding of interpersonal states and complementary use of EEG, fNIRS, and physiological signals (for reviews see <xref ref-type="bibr" rid="B26">Dissanayake et al., 2025</xref>; <xref ref-type="bibr" rid="B58">L&#x000FC;hmann and M&#x000FC;ller, 2017</xref>; <xref ref-type="bibr" rid="B73">Pinto-Orellana et al., 2024</xref>).</p>
</sec>
<sec>
<label>1.4</label>
<title>Challenges and structuring principles in the literature</title>
<p>Despite these methodological and analytic advances, synthesizing hyperscanning studies investigating digital social interaction remains challenging due to substantial heterogeneity. Heterogeneity is introduced by varying group size (dyads, triads, or larger collectives; <xref ref-type="bibr" rid="B45">Hou et al., 2022</xref>; <xref ref-type="bibr" rid="B71">Park et al., 2023</xref>), pre-existing social relationships between partners (<xref ref-type="bibr" rid="B2">Bae et al., 2024</xref>; <xref ref-type="bibr" rid="B24">Dikker et al., 2021</xref>), and the gender composition of paired participants (<xref ref-type="bibr" rid="B106">Zhang et al., 2023c</xref>). Furthermore, the degree of physical vs. virtual co-presence differs across paradigms with respect to how participants are situated (same or different rooms, physical or virtual co-presence), how information is exchanged, and to what extent interactions rely on digital vs. analog means (<xref ref-type="bibr" rid="B6">Balters et al., 2020</xref>). For instance, some experiments manipulate remoteness by prohibiting direct visual and/or auditory contact through a physical divide (e.g., <xref ref-type="bibr" rid="B98">Wu et al., 2023</xref>) or by placing participants back-to-back (e.g., <xref ref-type="bibr" rid="B48">K&#x000FC;tt et al., 2019</xref>).</p>
<p>Previous reviews have proposed frameworks to organize this diversity. For digitally mediated interaction, several categories have been identified by (<xref ref-type="bibr" rid="B7">Balters et al. 2021</xref>): (i) the type of communication (goal-directed vs. open-ended), (ii) the transfer of information (i.e., whether information exchange between participants happened via analog, digital, or mixed channels), (iii) the interaction medium (originally referred to as &#x0201C;interaction manipulative&#x0201D;) being digital or non-digital, and (iv) the interaction scenario, i.e., the spatial layout of how participants are situated relative to each other (e.g., face-to-face, side-by-side, virtually connected). Other work identified the experimental task itself as a central factor for recurring experimental archetypes (<xref ref-type="bibr" rid="B9">Barde et al., 2020</xref>; <xref ref-type="bibr" rid="B90">Wang et al., 2018</xref>). Moreover, a clear conceptual distinction can be made between collaborative interactions based on task symmetry, distinguishing between symmetrical interactions without predefined roles and asymmetrical tasks with designated roles (<xref ref-type="bibr" rid="B92">Wikstr&#x000F6;m, 2022</xref>). Together, these dimensions outline the core design space of hyperscanning studies, albeit they are currently not integrated across modalities or applied specifically to digitally mediated collaboration.</p>
</sec>
<sec>
<label>1.5</label>
<title>Objective of the present review</title>
<p>Without structuring the body of research considering all these dimensions, theoretical synthesis and cross-study comparison remain difficult, particularly when assessing how digitalization shapes the neural, physiological, and behavioral foundations of collaboration. The current review organizes hyperscanning research along dimensions relevant to studying remote and digitally mediated collaboration. Building on this structured synthesis, we present the framework for a living literature database: an extensible, interactive platform that categorizes hyperscanning studies according to measurement modalities, analytic approaches, interaction design dimensions, and task characteristics. The current version includes studies of remote collaborative interactions using mobile hyperscanning methods, with the long-term goal of expanding the database to incorporate additional modalities, paradigms, and newly published work.</p></sec>
</sec>
<sec id="s2">
<label>2</label>
<title>Methods</title>
<p>This review followed the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) guidelines (<xref ref-type="bibr" rid="B53">Liberati et al., 2009</xref>; <xref ref-type="bibr" rid="B69">Page et al., 2021</xref>). A summary of the retrieval, screening, and inclusion steps can be found in <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Overview of source retrieval. Flowchart of source retrieval process according to PRISMA guidelines.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0001.tif">
</graphic>
</fig>
<sec>
<label>2.1</label>
<title>Identification</title>
<p>To identify eligible sources for review, a broadly defined search for hyperscanning studies, including or focusing on digital interaction scenarios, was conducted using a combination of search terms that reflect hyperscanning, collaboration, recording modalities, and digital context (see <xref ref-type="table" rid="T1">Table 1</xref>). Initially, Scopus (<ext-link ext-link-type="uri" xlink:href="https://www.scopus.com">https://www.scopus.com</ext-link>), PubMed (<ext-link ext-link-type="uri" xlink:href="https://pubmed.ncbi.nlm.nih.gov">https://pubmed.ncbi.nlm.nih.gov</ext-link>), and Web of Science (<ext-link ext-link-type="uri" xlink:href="https://www.webofscience.com">https://www.webofscience.com</ext-link>) were searched. This search was conducted on April 28, 2025. The exact search terms were adapted according to the specifications of each search engine (for details, see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref>). The initial search was carried out for journal articles published in English or German, searching paper titles and abstracts only. In line with the PRISMA guidelines, additional (unsystematic) data searches were conducted as well (see <xref ref-type="fig" rid="F1">Figure 1</xref>). Any additional findings brought to us via search alerts or by colleagues were included if eligible, although no additional proactive search was conducted.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Concepts contained in search string components.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Concept</bold></th>
<th valign="top" align="left"><bold>Search string component</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Hyperscanning</td>
<td valign="top" align="left">(hyperscan<sup>&#x0002A;</sup> OR &#x0201C;social neuroscience&#x0201D; OR &#x0201C;two-person neuroscience&#x0201D; OR interbrain OR interpersonal OR &#x0201C;brain-to-brain interaction&#x0201D; OR interneural OR inter-subject OR synchron<sup>&#x0002A;</sup> OR coupling OR &#x0201C;functional connectivity&#x0201D; OR &#x0201C;effective connectivity&#x0201D;)</td>
</tr>
<tr>
<td valign="top" align="left">Collaboration</td>
<td valign="top" align="left">(team OR &#x0201C;team performance&#x0201D; OR &#x0201C;team engagement&#x0201D; OR &#x0201C;team dynamics&#x0201D; OR &#x0201C;group performance&#x0201D; OR &#x0201C;group dynamics&#x0201D; OR &#x0201C;collaborative engagement&#x0201D; OR &#x0201C;collaborative performance&#x0201D; OR &#x0201C;collective efficacy&#x0201D; OR flow OR &#x0201C;team flow&#x0201D; OR &#x0201C;work flow&#x0201D; OR &#x0201C;group flow&#x0201D; OR &#x0201C;collective flow&#x0201D;)</td>
</tr>
<tr>
<td valign="top" align="left">Recording modalities</td>
<td valign="top" align="left">(fnirs OR &#x0201C;functional near-infrared spectroscopy&#x0201D; OR eeg OR electroencephalogra<sup>&#x0002A;</sup> OR ecg OR electrocardiogra<sup>&#x0002A;</sup> OR ppg OR photoplethysmogra<sup>&#x0002A;</sup> OR eda OR &#x0201C;electrodermal activity&#x0201D; OR &#x0201C;heart rate&#x0201D; OR pulse OR &#x0201C;skin conductance&#x0201D; OR eye-track<sup>&#x0002A;</sup> OR &#x0201C;eye tracking&#x0201D; OR &#x0201C;gaze tracking&#x0201D; OR physiolog<sup>&#x0002A;</sup> OR multimodal<sup>&#x0002A;</sup> OR &#x0201C;multi-modal&#x0201D;)</td>
</tr>
<tr>
<td valign="top" align="left">Digital context</td>
<td valign="top" align="left">(remote OR virtual OR online OR web-based)</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>Search string components reflect the four conceptual domains used in the systematic database search: hyperscanning terminology, collaboration-related constructs, mobile neurophysiological and physiological recording modalities, and digital or remote interaction contexts. Quotation marks indicate exact phrase matching; asterisks denote wildcard operators.</p>
</table-wrap-foot>
</table-wrap>
<p>Following the PRISMA recommendation, we used the PICOS eligibility criteria (Population, Intervention, Comparison, Outcomes, Study design) to assess eligibility (see <xref ref-type="table" rid="T2">Table 2</xref>). For the presented review, we considered only studies with a <italic>population</italic> of at least two clinically healthy adult participants. As an eligible experimental <italic>intervention</italic>, we defined the application of one or more mobile brain or body imaging techniques for physiological data to focus on mobile and applied neuroergonomics contexts. We did not limit the search to one specific mobile method since we aimed to investigate the number of joint applications of measurements. Note that we also identified some studies using camera-based tracking, encompassing the assessment of eye contact and facial muscle movements. However, this modality was not explicitly included in the search scope. Nevertheless, a specific label was assigned in these cases, given that the modality might be of interest. For the <italic>comparison</italic> criterion, we included studies that scanned at least two participants simultaneously during an interaction. Given the search term, we expected to find mainly interaction scenarios with group-based collaboration. As <italic>outcomes</italic>, we defined any results addressing inter-subject dynamics as assessed by the measurement modalities of interest. Studies reporting no statistical results related to the physiological measures during the collaborative interactions were not included. Lastly, we included the additional <italic>study design</italic> criterion to limit the review to hyperscanning studies that introduced a collaborative scenario with a focus on a digital aspect in the study design. For example, studies meeting all initial criteria and focusing purely on face-to-face collaboration were excluded. Further, studies with tasks involving imitation rather than autonomous choice of all collaborators were not included (<xref ref-type="bibr" rid="B10">Bie&#x00144;kiewicz et al., 2021</xref>).</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>PICOS eligibility criteria applied during source selection.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>PICOS criterion</bold></th>
<th valign="top" align="left"><bold>Description</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Population</td>
<td valign="top" align="left">Two or more clinically healthy adult participants</td>
</tr>
<tr>
<td valign="top" align="left">Intervention</td>
<td valign="top" align="left">fNIRS, EEG, EDA, ECG, PPG, and eye-tracking are applied to acquire data for analysis</td>
</tr>
<tr>
<td valign="top" align="left">Comparison</td>
<td valign="top" align="left">Scanning two or more participants simultaneously during interaction</td>
</tr>
<tr>
<td valign="top" align="left">Outcomes</td>
<td valign="top" align="left">Inter-subject dynamics analysis</td>
</tr>
<tr>
<td valign="top" align="left">Study design</td>
<td valign="top" align="left">Collaborative scenario with one or more digital interaction media or scenarios</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>ECG, Electrocardiography; EDA, Electrodermal Activity; EEG, Electroencephalography; fNIRS, Functional Near-Infrared Spectroscopy; PPG, Photoplethysmography.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec>
<label>2.2</label>
<title>Duplicate removal</title>
<p>For duplicate removal, the Zotero (version 6.0.36) and Citavi (version 6.15) software were used, as well as an automated matching of cosine similarity of titles and abstracts using sklearn (Pedregosa et al., <xref ref-type="bibr" rid="B72">2011</xref>). Finally, the web-based review program Rayyan (<xref ref-type="bibr" rid="B68">Ouzzani et al., 2016</xref>) was used for duplicate removal and further screening procedures. All automated duplicate removals were manually confirmed before exclusion.</p>
</sec>
<sec>
<label>2.3</label>
<title>Screening</title>
<p>The identification procedure followed by duplicate removal resulted in 6,070 sources from the database searches and 120 sources from other methods (see <xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<sec>
<label>2.3.1</label>
<title>Initial screening</title>
<p>We adopted a liberal inclusion policy for every step of the screening process, i.e., we opted to include a source one step further during screening rather than excluding a source pre-emptively. Given the large amount of retrieved literature, we first screened titles to exclude some search results that were unlikely to meet the inclusion criteria. Titles including words or phrases like &#x0201C;rodents,&#x0201D; &#x0201C;non-human,&#x0201D; &#x0201C;epilepsy,&#x0201D; or &#x0201C;Alzheimer&#x0201D; were excluded, assuming that the Population criterion would not be met. Indications of the use of transcranial magnetic stimulation, transcranial electrical stimulation, or transcutaneous vagus nerve stimulation were another exclusion reason. Furthermore, several titles indicated that the source had no relation to the field of neuroscience in general and instead related to the physics of water flow or aerodynamics. Hereafter, we screened sources for eligibility based on both title and abstract.</p></sec>
<sec>
<label>2.3.2</label>
<title>Retrieval and manuscript screening for eligibility</title>
<p>After the initial title and abstract screening, the full-text versions of sources were retrieved. If full-text versions were not immediately retrievable (e.g., through open-source access), an effort was made to obtain the full-text version by means of university access rights or by contacting the corresponding author once via email to request access. We successfully retrieved 238 sources derived from the systematic search and an additional 21 from other methods. To screen and label sources for eligibility for the current review, the Rayyan software was used (<xref ref-type="bibr" rid="B68">Ouzzani et al., 2016</xref>). For each review stage, sources were included rather than excluded to decrease the chance of false exclusion. The eligibility criteria would be met with certainty in the final full-text review stage. Screening of the full manuscripts led to the exclusion of an additional 213 sources (for details, see <xref ref-type="fig" rid="F1">Figure 1</xref>). In summary, a total of 45 sources were included in this review. Note that, given that the criteria for <italic>study design</italic> and <italic>comparison</italic> often required in-depth reading of the methods and results sections to ensure thorough screening, many unsuitable sources were initially included in the full-text stage. Hence, those criteria were the most common late-stage exclusion reason (see <xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
</sec>
</sec>
<sec>
<label>2.4</label>
<title>Extraction of information and categorization</title>
<p>After collecting relevant sources based on the PRISMA criteria, we aimed to extract commonalities across studies by categorizing studies across 13 dimensions based on design choices, measurement modalities, analysis approaches, and targeted cognitive functions. Category definitions were adapted from existing hyperscanning reviews (e.g., <xref ref-type="bibr" rid="B6">Balters et al., 2020</xref>, <xref ref-type="bibr" rid="B7">2021</xref>; <xref ref-type="bibr" rid="B90">Wang et al., 2018</xref>) or based on influential factors of the sample and design (e.g., how auditory and visual information is exchanged between participants during the interaction). Each included source was individually reviewed and subsequently categorized by extracting the following information: (1) measurement modalities of interest used in the study (e.g., EEG, eye-tracking); (2) the number of participants included in analyses; (3) the pairing configuration of participants (whether measurements were taken simultaneously on dyads, triads, tetrads, or larger groups; <xref ref-type="bibr" rid="B45">Hou et al., 2022</xref>; <xref ref-type="bibr" rid="B71">Park et al., 2023</xref>); the pairing setup of participants, specifically variables of the setup that are known to influence behavior and subsequently results, such as (4) gender (<xref ref-type="bibr" rid="B106">Zhang et al., 2023c</xref>) and (5) relationship (<xref ref-type="bibr" rid="B2">Bae et al., 2024</xref>; <xref ref-type="bibr" rid="B24">Dikker et al., 2021</xref>) of simultaneously measured individuals; (6) the type of hyperscanning paradigm employed (see <xref ref-type="table" rid="T3">Table 3</xref>; <xref ref-type="bibr" rid="B9">Barde et al., 2020</xref>; <xref ref-type="bibr" rid="B90">Wang et al., 2018</xref>); (7) the task symmetry (<xref ref-type="bibr" rid="B92">Wikstr&#x000F6;m, 2022</xref>), where highly symmetrical tasks allow participants to assume equal roles (e.g., puzzling with shared pieces), whereas low-symmetry tasks involve distinct roles (e.g., navigator and pilot); (8) the type of communication defined as either open-ended or goal-driven, and (9) the transfer of information being either analog, digital, or mixed (<xref ref-type="bibr" rid="B6">Balters et al., 2020</xref>). Following the detailed interaction categories introduced by (<xref ref-type="bibr" rid="B7">Balters et al. 2021</xref>) for digital hyperscanning studies, the (10) interaction scenario during the experiment was assessed (i.e., how participants were situated in relation to each other during measurements) and (11) whether the experiment included verbal, physical, and/or digital interaction media in shared or separated manners (for details see <xref ref-type="table" rid="T4">Table 4</xref>; <xref ref-type="bibr" rid="B7">Balters et al., 2021</xref>). Given the diverse measurement modalities included in the present review, we aimed to categorize (12) analysis methods in a signal-agnostic manner, i.e., not tied to specific modalities like EEG or fNIRS (<xref ref-type="bibr" rid="B41">Hakim et al., 2023</xref>). Therefore, we distinguished on a higher level between the analysis approaches focused on temporal aspects, spatial distribution, and connectivity domains, as well as machine learning methods (for details, see <xref ref-type="table" rid="T5">Table 5</xref>). Finally, we attempted to highlight the (13) cognitive function of interest investigated primarily during analyses for each study (<xref ref-type="bibr" rid="B7">Balters et al., 2021</xref>). Note that multiple labels within one category may apply to the same study.</p>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Paradigm category descriptions.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Paradigm category</bold></th>
<th valign="top" align="left"><bold>Description</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Cooperation and competition tasks</td>
<td valign="top" align="left">Participants either collaborate toward a shared goal or compete against one another</td>
</tr>
<tr>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">Participants perform a task that requires actions to be coordinated in time across partners</td>
</tr>
<tr>
<td valign="top" align="left">Ecologically valid scenarios</td>
<td valign="top" align="left">Participants are placed in real-world interaction contexts while under neuroimaging</td>
</tr>
<tr>
<td valign="top" align="left">Economic exchange tasks</td>
<td valign="top" align="left">Participants exchange a type of currency (either real or artificially constructed for the experiment)</td>
</tr>
<tr>
<td valign="top" align="left">Eye contact/gaze-based tasks</td>
<td valign="top" align="left">Participants look at each other and/or follow the gaze of another</td>
</tr>
<tr>
<td valign="top" align="left">Imitation tasks</td>
<td valign="top" align="left">Participants imitate the other&#x00027;s movement or behavior</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The paradigm categories were originally derived by (<xref ref-type="bibr" rid="B90">Wang et al. 2018</xref>). Note that imitation and economic exchange tasks were included to provide a complete overview of categories, although none of the included studies in this review fall within those categories.</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Categories of interaction media.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Interaction medium</bold></th>
<th valign="top" align="left"><bold>Description</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Shared physical IM and verbal IM</td>
<td valign="top" align="left">Participants share physical objects and verbally communicate while interacting with the objects</td>
</tr>
<tr>
<td valign="top" align="left">Physical IM w/out verbal IM</td>
<td valign="top" align="left">Participants interact with non-shared tangible interfaces or musical instruments without verbal communication</td>
</tr>
<tr>
<td valign="top" align="left">Verbal IM</td>
<td valign="top" align="left">Participants solely interact verbally without any physical or non-verbal communication</td>
</tr>
<tr>
<td valign="top" align="left">Non-verbal IM</td>
<td valign="top" align="left">Participants solely interact non-verbally, such as looking at one another or synchronizing limb movements while observing one another</td>
</tr>
<tr>
<td valign="top" align="left">Shared digital IM and verbal IM</td>
<td valign="top" align="left">Participants interact together on one shared computer screen while also engaging in verbal interaction</td>
</tr>
<tr>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">Participants interact together on separate computer screens while also engaging in verbal interaction</td>
</tr>
<tr>
<td valign="top" align="left">Shared digital IM w/out verbal IM</td>
<td valign="top" align="left">Participants interact together on one shared computer screen without verbally communicating.</td>
</tr>
<tr>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">Participants interact on separate digital task media without interacting verbally</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The interaction medium categories were derived from (<xref ref-type="bibr" rid="B7">Balters et al. 2021</xref>), who defined those to cluster fNIRS hyperscanning research involving digital components. Note that the category was originally termed &#x0201C;interaction manipulative.&#x0201D;</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Categories of multimodal analysis methods.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Analysis domain</bold></th>
<th valign="top" align="left"><bold>Approach</bold></th>
<th valign="top" align="left"><bold>Description</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="2">Connectivity</td>
<td valign="top" align="left">Functional</td>
<td valign="top" align="left">Quantifies undirected statistical associations (e.g., correlation, synchrony) between signals from two or more individuals. Applicable to any time-series data</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Effective</td>
<td valign="top" align="left">Assesses directed influence or causal relationships between signals across individuals (e.g., Granger causality, transfer entropy, or directional mutual information). Applicable to any time-series data</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Temporal domain</td>
<td valign="top" align="left">Time-based</td>
<td valign="top" align="left">Analyzes time-aligned patterns in signals, capturing moment-to-moment fluctuations (e.g., alignment in gaze, HRV bursts, EEG amplitude). Applicable to signals with uniform time alignment</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Frequency-based</td>
<td valign="top" align="left">Decomposes signals into frequency components to analyze rhythmic coupling (e.g., alpha-band EEG, cardiac-respiratory oscillations). Applicable to oscillatory modalities</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Spatial domain</td>
<td valign="top" align="left">Sensor-level</td>
<td valign="top" align="left">Uses data at the measurement site (e.g., EEG electrode, fNIRS optode), enabling cross-participant topographic comparisons or coupling at sensor locations. Applicable to spatially distributed signals</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Source-level</td>
<td valign="top" align="left">Projects sensor-level data into source space (e.g., cortical regions or gaze location on screen), allowing spatially informed inter-individual comparisons. Applicable to spatially distributed signals</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Machine learning</td>
<td valign="top" align="left">Supervised</td>
<td valign="top" align="left">Mappings are learned from multimodal signal features to predefined labels (e.g., task success, social outcome) across dyads or groups, enabling prediction or classification of interaction dynamics. Applicable to any signal modality</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Unsupervised</td>
<td valign="top" align="left">Discovers latent patterns, clusters, or dimensions in joint participant data without labels (e.g., synchronized state clusters, engagement modes). Applicable to high-dimensional or multivariate data of any sensor modality</td>
</tr>
<tr>
<td valign="top" align="left">Other</td>
<td/>
<td valign="top" align="left">Grouping label of any analysis method that does not match the above categories</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The inclusion of various measurement modalities requires broad category definitions for analysis methods employed in studies.</p>
</table-wrap-foot>
</table-wrap>
<sec>
<label>2.4.1</label>
<title>Living literature review database &#x0226B;InterBrainDB&#x0226A;</title>
<p>Building on the initial review, a continuously updated &#x0201C;living&#x0201D; literature review was launched and maintained to track emerging research on multimodal hyperscanning (see <xref ref-type="fig" rid="F2">Figure 2</xref>). The app utilizes the categories as described in the previous section and includes an interactive visualization tool, serving as a dynamic, open-access resource. To build the interactive platform the Python library <italic>streamlit</italic> (version 1.45.1; <xref ref-type="bibr" rid="B86">Streamlit Inc., 2025</xref>) was used, supporting the browsers Google Chrome, Firefox, Microsoft Edge, and Safari. The database allows filtering by sample population, interaction design, modality, analysis method, and cognitive domain, and will be expanded to incorporate additional modalities and future publications. Thus, the purpose of the database is two-fold: (1) it is intended to supplement the present review by allowing for replication of results and figures, as well as keeping the present analyses up-to-date by integrating newly published, relevant literature; (2) it is intended as a tool for the scientific community to search for literature based on the introduced categories and be extended to include hyperscanning studies with digital components beyond the presented PICOS criteria. To achieve this long-term value, sources submitted by the scientific community via the app&#x00027;s anonymous submission form are reviewed and categorized monthly by the first author continuously over time. Moreover, categorization may be proposed by the users upon submission of a source and changes to existing labels can be requested by contacting the first author. The server-hosted living review can be found here: <ext-link ext-link-type="uri" xlink:href="https://websites.fraunhofer.de/interbraindb">https://websites.fraunhofer.de/interbraindb</ext-link>, with source code hosted at <ext-link ext-link-type="uri" xlink:href="https://github.com/acv132/InterBrainDB">https://github.com/acv132/InterBrainDB</ext-link>. So far, 106 studies have been categorized and included in the online database. Studies in the database comprise the studies included in this review paper as indicated within the database itself. Furthermore, hyperscanning studies not strictly meeting the presented PICOS criteria were included and categorized, such as studies with underage sample populations, non-mobile imaging (e.g., fMRI), or paradigms beyond digital collaboration (e.g., face-to-face collaboration). When initially opening the app, the default filter configuration includes only sources reported in this paper, allowing for easy replication of results and interactive figures. By customizing filters in the sidebar, the database enables users to perform analyses according to their preferences and to download figures, reference tables, and BibTex files.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Screenshot of living literature review platform. A screenshot of the platform hosting the living literature review built using the Python library Streamlit (version 1.45.1).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0002.tif">
</graphic>
</fig>
</sec>
</sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<sec>
<label>3.1</label>
<title>Study selection</title>
<p>An overview of the 45 included studies can be found in <xref ref-type="table" rid="T6">Table 6</xref>. The earliest studies included were published in the mid-2010s (see <xref ref-type="fig" rid="F3">Figure 3</xref>). The average sample size across studies was 55.8 &#x000B1; 70.3 (SEM = 10.48). The sample sizes varied considerably, from small-scale studies with four (<xref ref-type="bibr" rid="B74">P&#x000F6;ys&#x000E4;-Tarhonen et al., 2021</xref>) to large-scale studies with 480 participants (<xref ref-type="bibr" rid="B104">Zhang et al., 2023b</xref>). Most studies included within-subjects designs (<italic>n</italic> = 33), some analyzed between-subjects effects (<italic>n</italic> = 10), and two studies had a mixed design (<xref ref-type="bibr" rid="B30">Fang et al., 2022</xref>; <xref ref-type="bibr" rid="B99">Yamaya et al., 2025</xref>). This observed distribution might be attributed to the fact that sufficient statistical power of a between-subjects design requires a substantially larger recruitment effort for hyperscanning studies.</p>
<table-wrap position="float" id="T6">
<label>Table 6</label>
<caption><p>Overview of studies.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>References</bold></th>
<th valign="top" align="left"><bold>Measurement modalities</bold></th>
<th valign="top" align="left"><bold>Participants; group size, gender of pairs [fm, ff, mm], familiarity</bold></th>
<th valign="top" align="left"><bold>Paradigm</bold></th>
<th valign="top" align="left"><bold>Task symmetry</bold></th>
<th valign="top" align="left"><bold>Type of communication</bold></th>
<th valign="top" align="left"><bold>Transfer of information</bold></th>
<th valign="top" align="left"><bold>Interaction scenario</bold></th>
<th valign="top" align="left"><bold>Interaction medium</bold></th>
<th valign="top" align="left"><bold>Type of analysis</bold></th>
<th valign="top" align="left"><bold>Cognitive function</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" colspan="11"><bold>Multimodal measurements</bold></td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B5">Balconi et al. 2022</xref>)</td>
<td valign="top" align="left">[&#x0201C;ECG,&#x0201D; &#x0201C;EDA,&#x0201D; &#x0201C;EEG&#x0201D;]</td>
<td valign="top" align="left"><italic>N</italic> = 20; dyad, n/s, [&#x0201C;unfamiliar,&#x0201D; &#x0201C;instructor-student&#x0201D;]</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Verbal IM,&#x0201D; &#x0201C;separate digital IM and verbal IM&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;temporal domain frequency,&#x0201D; &#x0201C;spatial domain source&#x0201D;]</td>
<td valign="top" align="left">Social cognition</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B15">Chuang and Hsu 2023</xref>)</td>
<td valign="top" align="left">[&#x0201C;EEG,&#x0201D; &#x0201C;eye-tracking&#x0201D;]</td>
<td valign="top" align="left"><italic>N</italic> = 58; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], n/s</td>
<td valign="top" align="left">[&#x0201C;Eye-contact/gaze-based tasks,&#x0201D; &#x0201C;cooperation/competition tasks&#x0201D;]</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">[&#x0201C;FTF v-b,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;temporal domain time,&#x0201D; &#x0201C;temporal domain frequency,&#x0201D; &#x0201C;spatial domain sensor&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;attention,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B39">Gugnowska et al. 2022</xref>)</td>
<td valign="top" align="left">[&#x0201C;EEG,&#x0201D; &#x0201C;camera-based tracking&#x0201D;]</td>
<td valign="top" align="left"><italic>N</italic> = 28; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Physical IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Spatial domain sensor,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;motor,&#x0201D; &#x0201C;other&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B65">Numata et al. 2021</xref>)</td>
<td valign="top" align="left">[&#x0201C;ECG,&#x0201D; &#x0201C;EDA,&#x0201D; &#x0201C;other&#x0201D;]</td>
<td valign="top" align="left"><italic>N</italic> = 9; dyad, mm, familiar</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">Temporal domain time</td>
<td valign="top" align="left">[&#x0201C;memory,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B81">Shih et al. 2024</xref>)</td>
<td valign="top" align="left">[&#x0201C;fNIRS,&#x0201D; &#x0201C;camera-based tracking&#x0201D;]</td>
<td valign="top" align="left"><italic>N</italic> = 6; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], familiar</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;SBS,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Separate digital IM and verbal IM,&#x0201D; &#x0201C;separate digital IM w/out verbal IM&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;spatial domain sensor&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;other,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B82">Snijdewint and Scheepers 2023</xref>)</td>
<td valign="top" align="left">[&#x0201C;ECG,&#x0201D; &#x0201C;other&#x0201D;]</td>
<td valign="top" align="left"><italic>N</italic> = 117; triad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], [&#x0201C;familiar,&#x0201D; &#x0201C;unfamiliar&#x0201D;]</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;SBS v-b&#x0201D;]</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;connectivity functional&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;attention,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B91">Wang et al. 2024</xref>)</td>
<td valign="top" align="left">[&#x0201C;EEG,&#x0201D; &#x0201C;eye-tracking&#x0201D;]</td>
<td valign="top" align="left"><italic>N</italic> = 10; dyad, n/s, n/s</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;temporal domain frequency,&#x0201D; &#x0201C;connectivity functional&#x0201D;]</td>
<td valign="top" align="left">Executive function</td>
</tr>
<tr>
<td valign="top" align="left" colspan="11"><bold>EEG</bold></td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B1">Astolfi et al. 2020</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 32; dyad, mm, unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">FTF v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity effective,&#x0201D; &#x0201C;temporal domain frequency,&#x0201D; &#x0201C;spatial domain sensor,&#x0201D; &#x0201C;machine learning supervised,&#x0201D; &#x0201C;other&#x0201D;]</td>
<td valign="top" align="left">Visuospatial cognition</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B4">Balconi et al. 2023</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 8; more (n_group = 8), n/s, instructor-student</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Shared physical IM and verbal IM,&#x0201D; &#x0201C;separate digital IM and verbal IM&#x0201D;]</td>
<td valign="top" align="left">Temporal domain frequency</td>
<td valign="top" align="left">[&#x0201C;Social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B16">Chuang et al. 2024</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 58; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], n/s</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;temporal domain time,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Memory,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B17">Ciaramidaro et al. 2024</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 32; dyad, mm, n/s</td>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS</td>
<td valign="top" align="left">Shared digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Spatial domain source,&#x0201D; &#x0201C;connectivity effective,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Motor,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B19">Cross et al. 2022</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 40; dyad, [&#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], n/s</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;SBS,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Shared physical IM and verbal IM,&#x0201D; &#x0201C;separate digital IM and verbal IM&#x0201D;]</td>
<td valign="top" align="left">Temporal domain frequency</td>
<td valign="top" align="left">[&#x0201C;Motor,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B27">Du et al. 2022</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 36; triad, n/s, familiar</td>
<td valign="top" align="left">[&#x0201C;Ecologically valid setting,&#x0201D; &#x0201C;cooperation/competition tasks&#x0201D;]</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Language,&#x0201D; &#x0201C;attention,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">Fl&#x000F6;sch et al. (<xref ref-type="bibr" rid="B33">2024a</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 32; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], familiar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Spatial domain sensor,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Memory,&#x0201D; &#x0201C;language,&#x0201D; &#x0201C;social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B34">Fl&#x000F6;sch et al. 2024b</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 24; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], familiar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">Temporal domain time</td>
<td valign="top" align="left">[&#x0201C;Memory,&#x0201D; &#x0201C;language,&#x0201D; &#x0201C;social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B40">Gumilar et al. 2021</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 24; dyad, n/s, unfamiliar</td>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">[&#x0201C;Non-verbal IM,&#x0201D; &#x0201C;shared digital IM w/out verbal IM&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Spatial domain source,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Motor,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B42">Hayati et al. 2025</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 28; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], familiar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;SBS,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">Shared digital IM and verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;spatial domain sensor&#x0201D;]</td>
<td valign="top" align="left">Visuospatial cognition</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B55">Liu et al. 2024</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 104; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Temporal domain frequency,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;spatial domain source&#x0201D;]</td>
<td valign="top" align="left">Social cognition</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B51">L&#x000E9;n&#x000E9; et al. 2021</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 46; dyad, n/s, unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS v-b</td>
<td valign="top" align="left">Shared digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Attention,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B94">Wikstr&#x000F6;m et al. 2022</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 42; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], familiar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">virtual</td>
<td valign="top" align="left">Shared digital IM w/out verbal IM</td>
<td valign="top" align="left">&#x00023;BEZUG!</td>
<td valign="top" align="left">[&#x0201C;Motor,&#x0201D; &#x0201C;attention,&#x0201D; &#x0201C;executive function,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B102">Zhang et al. 2019</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 74; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">FTF v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;temporal domain time,&#x0201D; &#x0201C;spatial domain sensor,&#x0201D; &#x0201C;spatial domain source&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B108">Zhou et al. 2021</xref>)</td>
<td valign="top" align="left">EEG</td>
<td valign="top" align="left"><italic>N</italic> = 60; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], n/s</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">&#x00023;BEZUG!</td>
<td valign="top" align="left">Motor</td>
</tr>
<tr>
<td valign="top" align="left" colspan="11"><bold>fNIRS</bold></td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B8">Balters et al. 2023</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 72; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">[&#x0201C;Open-ended,&#x0201D; &#x0201C;goal-driven&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;analog,&#x0201D; &#x0201C;digital&#x0201D;]</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;machine learning unsupervised,&#x0201D; &#x0201C;spatial domain sensor&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B14">Cheng et al. 2019</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 62; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">FTF v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity effective,&#x0201D; &#x0201C;connectivity functional&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Motor,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B43">Hayne et al. 2023</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 84; dyad, fm, unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Spatial domain source,&#x0201D; &#x0201C;machine learning supervised&#x0201D;]</td>
<td valign="top" align="left">Social cognition</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B46">Hu et al. 2017</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 70; dyad, ff, unfamiliar</td>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">FTF v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Spatial domain sensor,&#x0201D; &#x0201C;connectivity functional&#x0201D;]</td>
<td valign="top" align="left">Social cognition</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B54">Liu et al. 2019</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 42; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], instructor-student</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Non-verbal IM,&#x0201D; &#x0201C;separate digital IM w/out verbal IM&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;spatial domain sensor&#x0201D;]</td>
<td valign="top" align="left">Social cognition</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B56">Lu et al. 2020</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 54; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Verbal IM,&#x0201D; &#x0201C;separate digital IM w/out verbal IM&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Temporal domain frequency,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;spatial domain sensor&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Language,&#x0201D; &#x0201C;social cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B70">Pan et al. 2017</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 98; dyad, fm, [&#x0201C;familiar,&#x0201D; &#x0201C;unfamiliar&#x0201D;]</td>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Temporal domain frequency,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;connectivity effective&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Motor,&#x0201D; &#x0201C;social cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B97">Wu et al. 2025</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 72; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Open-ended</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;SBS v-b,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Shared physical IM and verbal IM,&#x0201D; &#x0201C;separate digital IM w/out verbal IM,&#x0201D; &#x0201C;separate digital IM w/out verbal IM&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;spatial domain sensor,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;temporal domain frequency&#x0201D;]</td>
<td valign="top" align="left">Other</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B99">Yamaya et al. 2025</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 60; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], familiar</td>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">Verbal IM</td>
<td valign="top" align="left">Spatial domain source</td>
<td valign="top" align="left">Language</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B104">Zhang et al. 2023b</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 480; more (n_group = 6), [&#x0201C;ff,&#x0201D; &#x0201C;fm,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">[&#x0201C;Economic exchange tasks,&#x0201D; &#x0201C;cooperation/competition tasks&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;High,&#x0201D; &#x0201C;low&#x0201D;]</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;temporal domain frequency,&#x0201D; &#x0201C;spatial domain source&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B103">Zhang et al. 2023a</xref>)</td>
<td valign="top" align="left">fNIRS</td>
<td valign="top" align="left"><italic>N</italic> = 84; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;SBS,&#x0201D; &#x0201C;SBS v-b&#x0201D;]</td>
<td valign="top" align="left">Shared digital IM and verbal IM</td>
<td valign="top" align="left">[&#x0201C;Connectivity functional,&#x0201D; &#x0201C;spatial domain sensor,&#x0201D; &#x0201C;machine learning supervised&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Memory,&#x0201D; &#x0201C;language&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left" colspan="11"><bold>Eye-tracking</bold></td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B13">Cheng et al. 2022</xref>)</td>
<td valign="top" align="left">Eye-tracking</td>
<td valign="top" align="left"><italic>N</italic> = 32; dyad, n/s, n/s</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">[&#x0201C;Attention,&#x0201D; &#x0201C;social cognition,&#x0201D; &#x0201C;executive function,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B32">Findik-Co&#x0015F;kun&#x000E7;ay and &#x000C7;akir 2022</xref>)</td>
<td valign="top" align="left">Eye-tracking</td>
<td valign="top" align="left"><italic>N</italic> = 27; triad, n/s, n/s</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;connectivity functional&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Attention,&#x0201D; &#x0201C;social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B44">Hoffmann et al. 2024</xref>)</td>
<td valign="top" align="left">Eye-tracking</td>
<td valign="top" align="left"><italic>N</italic> = 74; dyad, n/s, familiar</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS v-b</td>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">Connectivity functional</td>
<td valign="top" align="left">Attention</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B48">K&#x000FC;tt et al. 2019</xref>)</td>
<td valign="top" align="left">Eye-tracking</td>
<td valign="top" align="left"><italic>N</italic> = 40; dyad, n/s, [&#x0201C;familiar,&#x0201D; &#x0201C;unfamiliar&#x0201D;]</td>
<td valign="top" align="left">Ecologically valid setting</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">BTB</td>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">[&#x0201C;Attention,&#x0201D; &#x0201C;language&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B74">P&#x000F6;ys&#x000E4;-Tarhonen et al. 2021</xref>)</td>
<td valign="top" align="left">Eye-tracking</td>
<td valign="top" align="left"><italic>N</italic> = 4; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], familiar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">[&#x0201C;Attention,&#x0201D; &#x0201C;social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B95">Wisiecka et al. 2023</xref>)</td>
<td valign="top" align="left">Eye-tracking</td>
<td valign="top" align="left"><italic>N</italic> = 54; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], n/s</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">[&#x0201C;SBS,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Shared digital IM and verbal IM,&#x0201D; &#x0201C;separate digital IM and verbal IM&#x0201D;]</td>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">[&#x0201C;Social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">&#x00160;pakov et al. (<xref ref-type="bibr" rid="B84">2019</xref>)</td>
<td valign="top" align="left">Eye-tracking</td>
<td valign="top" align="left"><italic>N</italic> = 40; dyad, fm, [&#x0201C;familiar,&#x0201D; &#x0201C;unfamiliar&#x0201D;]</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">[&#x0201C;FTF v-b,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">Separate digital IM and verbal IM</td>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">[&#x0201C;Attention,&#x0201D; &#x0201C;social cognition,&#x0201D; &#x0201C;executive function,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left" colspan="11"><bold>Physiological data (ECG, EDA, EMG, other)</bold></td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">Fang et al. 2022</xref>)</td>
<td valign="top" align="left">ECG</td>
<td valign="top" align="left"><italic>N</italic> = 24; more (n_group = 6), fm, n/s</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">Mixed</td>
<td valign="top" align="left">[&#x0201C;FTF,&#x0201D; &#x0201C;virtual&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Verbal IM,&#x0201D; &#x0201C;separate digital IM and verbal IM&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;connectivity functional&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Attention,&#x0201D; &#x0201C;social cognition,&#x0201D; &#x0201C;executive function&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B85">Strang et al. 2014</xref>)</td>
<td valign="top" align="left">ECG</td>
<td valign="top" align="left"><italic>N</italic> = 80; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">FTF</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Temporal domain time,&#x0201D; &#x0201C;connectivity functional&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;Executive function,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B49">Le Bars et al. 2020</xref>)</td>
<td valign="top" align="left">EDA</td>
<td valign="top" align="left"><italic>N</italic> = 35; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Cooperation/competition tasks</td>
<td valign="top" align="left">Low</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">Temporal domain time</td>
<td valign="top" align="left">[&#x0201C;Executive function,&#x0201D; &#x0201C;visuospatial cognition&#x0201D;]</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B62">Melendez-Calderon et al. 2015</xref>)</td>
<td valign="top" align="left">EMG</td>
<td valign="top" align="left"><italic>N</italic> = 10; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;mm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">Coordination tasks</td>
<td valign="top" align="left">High</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">SBS v-b</td>
<td valign="top" align="left">Separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Other,&#x0201D; &#x0201C;temporal domain time&#x0201D;]</td>
<td valign="top" align="left">Motor</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B47">Konvalinka et al. 2023</xref>)</td>
<td valign="top" align="left">Other</td>
<td valign="top" align="left"><italic>N</italic> = 24; dyad, [&#x0201C;ff,&#x0201D; &#x0201C;fm&#x0201D;], unfamiliar</td>
<td valign="top" align="left">[&#x0201C;Coordination tasks,&#x0201D; &#x0201C;imitation tasks&#x0201D;]</td>
<td valign="top" align="left">[&#x0201C;High,&#x0201D; &#x0201C;low&#x0201D;]</td>
<td valign="top" align="left">Goal-driven</td>
<td valign="top" align="left">Digital</td>
<td valign="top" align="left">Virtual</td>
<td valign="top" align="left">separate digital IM w/out verbal IM</td>
<td valign="top" align="left">[&#x0201C;Temporal domain frequency,&#x0201D; &#x0201C;connectivity functional,&#x0201D; &#x0201C;temporal domain time&#x0201D;]</td>
<td valign="top" align="left">Motor</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>Overview of all included sources and assigned category labels. BTB, back-to-back; ECG, electrocardiography; EDA, electrodermal activity; EEG, electroencephalography; ff, female-female pairing; fm, female-male or otherwise mixed-gender pairing; fNIRS, functional near-infrared spectroscopy; FTF v-b, face-to-face with visual barriers; FTF, face-to-face; IM, interaction medium; mm, male-male pairing; n/s, not specified; SBS v-b, side-by-side with visual barriers; SBS, side-by-side.</p>
</table-wrap-foot>
</table-wrap>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Number of publications by year. The number of publications selected for this review plotted over the years. The earliest included studies were published in the mid-2010s (<xref ref-type="bibr" rid="B62">Melendez-Calderon et al., 2015</xref>; <xref ref-type="bibr" rid="B85">Strang et al., 2014</xref>) with a steady increase in the number of publications following thereafter, peaking with <italic>n</italic> = 9 in 2023. Note a noticeable dip in published studies occurred following the onset of the COVID pandemic at the end of the year 2019.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0003.tif">
</graphic>
</fig>
</sec>
<sec>
<label>3.2</label>
<title>Study categorization</title>
<p>In the following section, the results of the categorization of the selected studies according to the labels described in Section 2.4 and detailed in <xref ref-type="table" rid="T6">Table 6</xref> are summarized.</p>
<sec>
<label>3.2.1</label>
<title>Measurement modalities</title>
<p>Overall, 19 studies included EEG, 12 studies included fNIRS, and the 14 remaining studies included a type of physiological measurement or camera-based tracking of a physiological signal (see <xref ref-type="fig" rid="F4">Figure 4</xref>). Few papers (n = 7) reported results of multiple measurement modalities jointly, and even fewer (n = 5) reported a combination of physiological (body) and neurophysiological (brain) methods (EEG: <xref ref-type="bibr" rid="B5">Balconi et al., 2022</xref>; <xref ref-type="bibr" rid="B15">Chuang and Hsu, 2023</xref>; <xref ref-type="bibr" rid="B39">Gugnowska et al., 2022</xref>; <xref ref-type="bibr" rid="B91">Wang et al., 2024</xref>; fNIRS: <xref ref-type="bibr" rid="B81">Shih et al., 2024</xref>). With respect to other modalities, eye-tracking was employed in nine studies, but only two studies combined eye-tracking with EEG (<xref ref-type="bibr" rid="B15">Chuang and Hsu, 2023</xref>; <xref ref-type="bibr" rid="B91">Wang et al., 2024</xref>), and none reported the joint use of fNIRS and eye-tracking. A similarly limited pattern was observed with ECG and EDA: two studies employed ECG alone (<xref ref-type="bibr" rid="B30">Fang et al., 2022</xref>; <xref ref-type="bibr" rid="B85">Strang et al., 2014</xref>), while EDA was used in only one study as a standalone measure (<xref ref-type="bibr" rid="B49">Le Bars et al., 2020</xref>). ECG and EDA were only combined once with EEG (<xref ref-type="bibr" rid="B5">Balconi et al., 2022</xref>). Camera-based tracking was integrated with fNIRS in one instance (<xref ref-type="bibr" rid="B81">Shih et al., 2024</xref>) and with EEG in another (<xref ref-type="bibr" rid="B39">Gugnowska et al., 2022</xref>).</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Category counts of measurement modalities. Measurement modalities were distributed across eight categories. The category &#x0201C;other&#x0201D; included two studies measuring respiration and one study measuring impedance cardiography.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0004.tif">
</graphic>
</fig>
</sec>
<sec>
<label>3.2.2</label>
<title>Participants</title>
<p>The majority of studies investigated inter-subject dynamics in a dyadic setting (<italic>n</italic> = 39) with an average sample size of 46.6 &#x000B1; 26.1 (SEM = 4.17, min: 4, max: 104), although three studies measured triads (<xref ref-type="bibr" rid="B27">Du et al., 2022</xref>; <xref ref-type="bibr" rid="B32">Findik-Co&#x0015F;kun&#x000E7;ay and &#x000C7;akir, 2022</xref>; <xref ref-type="bibr" rid="B82">Snijdewint and Scheepers, 2023</xref>), two studies measured groups of six (<xref ref-type="bibr" rid="B30">Fang et al., 2022</xref>; <xref ref-type="bibr" rid="B104">Zhang et al., 2023b</xref>), and one paper reported measuring eight individuals at once (<xref ref-type="bibr" rid="B4">Balconi et al., 2023</xref>).</p>
<p>Most reported that paired participants were unfamiliar with each other (<italic>n</italic> = 23), although many also used familiar pairings of participants (<italic>n</italic> = 14). Ten studies did not state conclusively whether interacting participants knew each other. In three studies, the relationship between partners was identified to be of an instructor-student nature (<xref ref-type="bibr" rid="B5">Balconi et al., 2022</xref>, <xref ref-type="bibr" rid="B4">2023</xref>; <xref ref-type="bibr" rid="B54">Liu et al., 2019</xref>).</p>
<p>The majority of studies included measurements of same-sex pairs (31 studies), with one study including only female pairs (<xref ref-type="bibr" rid="B46">Hu et al., 2017</xref>) and three studies including only male pairs (<xref ref-type="bibr" rid="B1">Astolfi et al., 2020</xref>; <xref ref-type="bibr" rid="B17">Ciaramidaro et al., 2024</xref>; <xref ref-type="bibr" rid="B65">Numata et al., 2021</xref>). Fewer studies indicated a setup with different-sex pairs (<italic>n</italic> = 17), with four studies including only mixed-gender pairs (<xref ref-type="bibr" rid="B30">Fang et al., 2022</xref>; <xref ref-type="bibr" rid="B43">Hayne et al., 2023</xref>; <xref ref-type="bibr" rid="B70">Pan et al., 2017</xref>; &#x00160;pakov et al., <xref ref-type="bibr" rid="B84">2019</xref>). For 14 studies, the combination of pairs was not specified conclusively. Note that the number of participants who indicated to identify as &#x0201C;non-binary&#x0201D; or &#x0201C;other&#x0201D; was negligibly small, although in this case, the setup was counted as a different-sex setup (<xref ref-type="bibr" rid="B30">Fang et al., 2022</xref>; <xref ref-type="bibr" rid="B43">Hayne et al., 2023</xref>; &#x00160;pakov et al., <xref ref-type="bibr" rid="B84">2019</xref>).</p></sec>
<sec>
<label>3.2.3</label>
<title>Paradigm category</title>
<p>We adapted a paradigm categorization based on six previously defined archetypes (<xref ref-type="bibr" rid="B9">Barde et al., 2020</xref>; <xref ref-type="bibr" rid="B90">Wang et al., 2018</xref>). The paradigm categorization revealed that for the studies included in our review, most studies represented cooperation/competition tasks (<italic>n</italic> = 22), while some paradigms were more representative of an ecologically valid setting (<italic>n</italic> = 16). Seven studies were identified as belonging to the category of coordination tasks. One study employed a cooperation/competition paradigm with an interaction mechanic relying on gaze itself (<xref ref-type="bibr" rid="B15">Chuang and Hsu, 2023</xref>). In another study, respiratory synchrony in a coordination task was compared to that in an imitation paradigm (<xref ref-type="bibr" rid="B47">Konvalinka et al., 2023</xref>). The latter study used a confederate who did not actually collaborate, yet it was included because participants believed they were engaging in a collaborative task related to respiration coordination. Most studies utilizing a form of economic exchange tasks were not included since they lacked a collaborative component (i.e., focused on competitive interactions), except one study in which participants jointly participated in such a task to investigate collaborative decision making in competing groups (<xref ref-type="bibr" rid="B104">Zhang et al., 2023b</xref>).</p></sec>
<sec>
<label>3.2.4</label>
<title>Task symmetry</title>
<p>Thirty-three studies involved high task symmetry between participants, i.e., participants fulfilling similar roles, whereas 14 studies were classified as low-symmetry studies, i.e., they involved predefined, distinct roles. One study incorporated a range of tasks with varying symmetry (<xref ref-type="bibr" rid="B47">Konvalinka et al., 2023</xref>), and another involved the simultaneous measurement of multiple participants, some exhibiting high symmetry and others low symmetry toward each other (<xref ref-type="bibr" rid="B104">Zhang et al., 2023b</xref>).</p></sec>
<sec>
<label>3.2.5</label>
<title>Type of communication</title>
<p>To better understand the nature of interaction in hyperscanning studies, we examined the objective of communication. Goal-driven communication was assumed when an explicit goal, objective or outcome was pursued in the task. Any task with no clear or explicit objective was categorized as open-ended. Tasks containing elements of both were defined as mixed communication. Thirty-three studies involved goal-driven communication (either verbal or non-verbal). Nine studies involved open-ended communication, of which all were identified as utilizing an ecologically valid setting during the sessions. Four studies employed a mixed communication (<xref ref-type="bibr" rid="B27">Du et al., 2022</xref>; <xref ref-type="bibr" rid="B30">Fang et al., 2022</xref>; <xref ref-type="bibr" rid="B48">K&#x000FC;tt et al., 2019</xref>; <xref ref-type="bibr" rid="B65">Numata et al., 2021</xref>). For instance, one study had participants play a game of <italic>Mafia</italic>, which allows for an open-ended portion of communication as part of the game, where participants could freely choose whether to speak and what to say (<xref ref-type="bibr" rid="B30">Fang et al., 2022</xref>).</p></sec>
<sec>
<label>3.2.6</label>
<title>Transfer of information</title>
<p>Before detailing the transfer of information based on the interaction medium and the interaction scenario, we broadly categorized whether information exchange between participants happened via analog, digital, or mixed channels (<xref ref-type="bibr" rid="B6">Balters et al., 2020</xref>). Since one of the study eligibility criteria was the focus on a digital aspect in collaboration, all included studies utilized some form of digital information transfer, where most of them (<italic>n</italic> = 34) examined exclusively digital communication. Approximately a fourth of all studies (<italic>n</italic> = 11) involved a mixed information transfer of both digital and analog ways. One study included a between-subjects comparison of remote and in-person collaboration and hence was also categorized as containing an analog form of information transfer (<xref ref-type="bibr" rid="B8">Balters et al., 2023</xref>).</p></sec>
<sec>
<label>3.2.7</label>
<title>Interaction scenarios</title>
<p>Interaction scenarios refer to the physical or virtual setup of the participants relatively to each other, e.g., facing each other, sitting side-by-side, or interacting remotely via screens or Virtual Reality setups. As expected, based on the search criteria, most studies took place in a form of virtual scenario (<italic>n</italic> = 29). Some approximated a remote setting by placing a visual barrier between participants positioned side-by-side (<italic>n</italic> = 10), face-to-face (<italic>n</italic> = 6), or seating them back-to-back (<italic>n</italic> = 1). Some studies directly compared one of these scenarios to a collaborative interaction in person or utilized a form of purely digital means of interaction medium, leading to the inclusion of face-to-face (<italic>n</italic> = 9) and side-by-side (<italic>n</italic> = 6) paradigm scenarios.</p></sec>
<sec>
<label>3.2.8</label>
<title>Interaction medium</title>
<p>Interaction medium refers to the kind of interaction that the participants experience with each other and what type of medium was used, e.g., whether participants were allowed to verbally communicate, whether they interacted with physical objects, or via a digital tool (see <xref ref-type="table" rid="T4">Table 4</xref>). Most studies utilized a separate means of digital interaction (e.g., two screens) without verbal exchanges (<italic>n</italic> = 24). A few had similar setups while allowing for verbal interactions (<italic>n</italic> = 13). Fewer studies utilized shared digital interaction media (without verbal: <italic>n</italic> = 4; with verbal: <italic>n</italic> = 3).</p></sec>
<sec>
<label>3.2.9</label>
<title>Comparison of the interaction medium and scenario</title>
<p>We also looked at the interaction medium and the interaction scenario jointly to define the extent to which a study design is digital (<xref ref-type="bibr" rid="B7">Balters et al., 2021</xref>). For a differentiated analysis, we mapped studies along the dimensions of interaction medium, including the presence of verbal communication, interaction scenario, measurement modality, and cross-condition occurrences within the same study. Results are summarized in <xref ref-type="fig" rid="F5">Figure 5</xref> and <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 2</xref>.</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Distribution of hyperscanning conditions across modalities and interaction types. The cross-sectional distribution of all hyperscanning conditions of 45 studies across interaction medium and interaction scenario axes. The numbers in circles provide the counted occurrences (<italic>n</italic> = 75 conditions across modalities) of the cross-section of an interaction medium and scenario (<italic>n</italic> = 22 unique combinations, shown as circles). The colors represent the measurement modalities reported for each cross-section of conditions. The connection lines indicate reported cross-condition occurrences separated by axis (<italic>n</italic> = 16 simultaneous condition occurrences). Studies involving a digital component, either through a digital medium or a virtual interaction scenario, are marked through a gray shaded area. Note that conditions that do not fall within this gray area are part of a comparison to a condition with a digital component. ECG, electrocardiography; EDA, electrodermal activity; EEG, electroencephalography; fNIRS, functional near-infrared spectroscopy.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0005.tif">
</graphic>
</fig>
<p>Across 45 multimodal hyperscanning studies, a total of 75 experimental conditions were reported, consisting of 22 unique combinations of interaction medium and interaction scenario (see <xref ref-type="fig" rid="F5">Figure 5</xref>). Moreover, 16 co-occurrences of two or more conditions comparing medium and/or scenario were found, with eight comparisons of a digital with a non-digital interaction setup (for details see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 2</xref>). The number of studies including non-verbal communication (<italic>n</italic> = 26) was slightly higher than the number including verbal communication (<italic>n</italic> = 19). Notably, most studies were designed with a virtual divide or environment and separate interaction media, independent of the inclusion of verbal communication.</p>
<p>Some modality-specific patterns became evident by examining the cross-sectional distribution of interaction medium and scenario variations (for details, see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 2</xref>). FNIRS studies included more occurrences of various interaction conditions within the same study, including several that contrasted physical, hybrid, and fully digital interactions, as well as the greatest relative number of unique medium-scenario combinations (<italic>n</italic> = 10 in 12 included studies) with six cross-condition occurrences within the same studies (for details, see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 2</xref>). EEG studies also included a high number of unique medium-scenario combinations (<italic>n</italic> = 14 in 19 included studies), however, only six studies included more than one condition at once (for details, see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 3</xref>). Eye-tracking studies were confined to digital scenarios, resulting in fewer unique combinations and no direct digital-analog comparisons (for details, see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 4</xref>). Together, these patterns highlight substantial methodological variation across modalities in how interaction settings are operationalized and compared.</p></sec>
<sec>
<label>3.2.10</label>
<title>Analysis approaches</title>
<p>The most common analysis method involved measuring functional connectivity (<italic>n</italic> = 29), with some studies also including effective connectivity (<italic>n</italic> = 4). Of these, two used Granger causality (<xref ref-type="bibr" rid="B14">Cheng et al., 2019</xref>; <xref ref-type="bibr" rid="B70">Pan et al., 2017</xref>) and two computed partial directed coherence (<xref ref-type="bibr" rid="B1">Astolfi et al., 2020</xref>; <xref ref-type="bibr" rid="B17">Ciaramidaro et al., 2024</xref>). The second most prevalent signal analysis approach focused on the temporal domain, examining time (<italic>n</italic> = 17) or frequency (<italic>n</italic> = 20) aspects. A few studies included analyses in the spatial domain, be it in sensor space (<italic>n</italic> = 14) or source space (<italic>n</italic> = 8). Two EEG studies (<xref ref-type="bibr" rid="B1">Astolfi et al., 2020</xref>; <xref ref-type="bibr" rid="B108">Zhou et al., 2021</xref>) and two fNIRS studies (<xref ref-type="bibr" rid="B43">Hayne et al., 2023</xref>; <xref ref-type="bibr" rid="B103">Zhang et al., 2023a</xref>) employed a form of supervised machine learning. One study employed a support vector machine (SVM) to differentiate between cooperative, solo, and competitive gaming based on hemodynamic features from executive and motor regions (<xref ref-type="bibr" rid="B43">Hayne et al., 2023</xref>). Notably, they determined within-subject features during the cooperative interaction rather than features based on dyadic indices. Another fNIRS study predicted mnemonic similarity based on the interpersonal neural synchronization during a collaborative remembering task (<xref ref-type="bibr" rid="B103">Zhang et al., 2023a</xref>). By creating features based on graph indices derived from partial directed coherence values, social and non-social conditions were distinguished using an SVM. (<xref ref-type="bibr" rid="B108">Zhou et al. 2021</xref>) used a logistic regression to demonstrate that the conditional manipulation of dyads could be predicted by using cumulative inter-brain synchrony from significant electrode pairs as classification features. Only one fNIRS study reported the use of unsupervised machine learning to investigate interactions, namely a k-means clustering approach to identify dynamic interbrain coherence states along with their corresponding occurrence rates across time during an online conversation task (see <xref ref-type="fig" rid="F6">Figure 6</xref>; <xref ref-type="bibr" rid="B8">Balters et al., 2023</xref>). Other analysis methods included a form of graph theory (<xref ref-type="bibr" rid="B1">Astolfi et al., 2020</xref>), two reports on the amount of gaze sharing (<xref ref-type="bibr" rid="B48">K&#x000FC;tt et al., 2019</xref>; <xref ref-type="bibr" rid="B95">Wisiecka et al., 2023</xref>), another three focusing on gaze pattern analysis (<xref ref-type="bibr" rid="B13">Cheng et al., 2022</xref>; <xref ref-type="bibr" rid="B74">P&#x000F6;ys&#x000E4;-Tarhonen et al., 2021</xref>; &#x00160;pakov et al., <xref ref-type="bibr" rid="B84">2019</xref>), and one study analyzing co-contraction of two muscles in the right forearm measured via EMG (2015).</p>
<fig position="float" id="F6">
<label>Figure 6</label>
<caption><p>Category counts of analysis methods. Analysis methods were distributed across eight categories. The category &#x0201C;other&#x0201D; included two studies assessing the amount of gaze-sharing, two studies analyzing gaze patterns, and one study measuring muscle co-contraction.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0006.tif">
</graphic>
</fig>
</sec>
<sec>
<label>3.2.11</label>
<title>Cognitive functions of interest</title>
<p>Categorization of the cognitive function of interest was not clear-cut because many tasks required the engagement of multiple cognitive functions. Based on explicitly stated research objectives (e.g., <xref ref-type="bibr" rid="B103">Zhang et al., 2023a</xref>) or the described tasks (e.g., a visual search task; <xref ref-type="bibr" rid="B88">Szymanski et al., 2017</xref>), the main functions of interest were categorized (see <xref ref-type="fig" rid="F7">Figure 7</xref>). Perhaps unsurprisingly, given the focus on collaborative interactions, most studies focused on executive functioning (<italic>n</italic> = 22) or social cognition (<italic>n</italic> = 19). Various studies addressed visuospatial cognition (<italic>n</italic> = 12) and attention (<italic>n</italic> = 12) during collaborative scenarios. Some studies focused, by design, more strongly on motor functions (<italic>n</italic> = 10) or language (<italic>n</italic> = 7). Few studies mainly investigated memory-related processes (<italic>n</italic> = 5) or other cognitive functions (<italic>n</italic> = 3). The latter included two studies that emphasized creative design (<xref ref-type="bibr" rid="B81">Shih et al., 2024</xref>; <xref ref-type="bibr" rid="B97">Wu et al., 2025</xref>) and one focusing on joint musical ability during remote piano playing (<xref ref-type="bibr" rid="B39">Gugnowska et al., 2022</xref>).</p>
<fig position="float" id="F7">
<label>Figure 7</label>
<caption><p>Category counts of cognitive functions. Cognitive functions were distributed across eight categories. The category &#x0201C;other&#x0201D; included two studies focusing on joint designing ability and one study assessing joint musical ability.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0007.tif">
</graphic>
</fig>
</sec>
<sec>
<label>3.2.12</label>
<title>Relating modality, analysis, and cognitive function</title>
<p>To understand how measurement modality aligns with analysis and cognitive function under study, we plotted their relation in <xref ref-type="fig" rid="F8">Figures 8</xref>, <xref ref-type="fig" rid="F9">9</xref>. These comparisons highlight clear patterns in analytic preferences as well as notable methodological gaps.</p>
<fig position="float" id="F8">
<label>Figure 8</label>
<caption><p>Heatmap of measurement modalities and analysis methods. Heatmap of studies across the categorical dimensions analysis method (rows) and measurement modality (columns). Cell values show raw co-occurrence counts between the row and column categories. Cell color intensity increases with the value in each cell; darker cells indicate higher numbers. camera, camera-based tracking; ECG, electrocardiography; EDA, electrodermal activity; EEG, electroencephalography; EMG, electromyography; eye, eye-tracking; fNIRS, functional near-infrared spectroscopy.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0008.tif">
</graphic>
</fig>
<fig position="float" id="F9">
<label>Figure 9</label>
<caption><p>Alluvial plot of measurement modality, analysis method, and cognitive function. Alluvial diagram illustrating category flows across the categories measurement modality (left), analysis method (middle), and cognitive function (right) shown as vertical columns of nodes. Node height is proportional to the number of outgoing links summed across all destinations, and link thickness reflects transitions between adjacent categories. Colors indicate measurement modality. Note that some nodes in the diagram originate from one measurement modality but are colored according to another modality to represent studies in which multiple measurement modalities were used simultaneously.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnrgo-07-1756956-g0009.tif">
</graphic>
</fig>
<p>Overall, fNIRS emerged as the most versatile modality regarding analysis, appearing across nearly all analysis categories. It also represented the only instance of unsupervised machine learning within the included studies. While EEG was the most prominently used modality, studies showed a more selective analysis profile, with analyses predominantly situated in the functional connectivity and the frequency domain. Eye-tracking studies were distributed in nearly equal proportions across time-domain, functional connectivity approaches, and other forms of analyses. Peripheral physiological signals (e.g., ECG, EDA, EMG) were primarily analyzed in the time domain and through functional connectivity approaches, reflecting an application in capturing autonomic or muscular synchrony. Camera-based tracking appeared only sparsely across several categories. Across modalities, several analytic approaches appeared markedly underutilized. Effective connectivity was rare, with only a handful of studies applying it despite its relevance for examining directional influences between interacting partners (<xref ref-type="bibr" rid="B1">Astolfi et al., 2020</xref>; <xref ref-type="bibr" rid="B14">Cheng et al., 2019</xref>; <xref ref-type="bibr" rid="B17">Ciaramidaro et al., 2024</xref>; <xref ref-type="bibr" rid="B70">Pan et al., 2017</xref>). Machine learning approaches were equally rare (<xref ref-type="bibr" rid="B1">Astolfi et al., 2020</xref>; <xref ref-type="bibr" rid="B8">Balters et al., 2023</xref>; <xref ref-type="bibr" rid="B43">Hayne et al., 2023</xref>; <xref ref-type="bibr" rid="B106">Zhang et al., 2023c</xref>; <xref ref-type="bibr" rid="B108">Zhou et al., 2021</xref>), with only one study employing an unsupervised approach (<xref ref-type="bibr" rid="B8">Balters et al., 2023</xref>). Both effective connectivity and machine learning methods were limited to brain-based modalities. Together, these patterns indicate a strong reliance on functional connectivity and temporal analyses, with substantial methodological opportunities for future work.</p>
<p>Across modalities and analytic approaches, executive function and social cognition consistently emerge as the most frequently investigated cognitive domains, forming the largest group of targeted cognitive functions. Executive function was associated with the largest body of studies overall, particularly from EEG-based time-domain, frequency-domain, and functional connectivity analyses, as well as from fNIRS connectivity approaches. Social cognition exhibits an equally robust presence, especially in fNIRS studies employing functional connectivity methods as well as in eye-tracking studies. In contrast, domains such as memory, language, and motor cognition were addressed in fewer studies. Visuospatial cognition occupies a middle ground: although not as prominent as executive or social cognition, it shows a clear association with EEG-based temporal and spatial analyses, indicating a more specialized but stable niche. Interestingly, comparatively few studies involving eye-tracking focused on visuospatial cognition (for details, see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 5</xref>).</p>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>We systematically collected and analyzed studies on hyperscanning that explored collaboration in digital contexts. The categorization revealed that most studies used either EEG or fNIRS as measurement tools, with only seven out of 45 employing multimodal measurement approaches. Among these, most combined EEG with either eye-tracking or ECG, indicating a notable underuse of multimodal measurement techniques. Most studies focused on same-sex dyads, with roughly two-thirds of participants unfamiliar with each other. The studies mainly involved cooperation or competition tasks or aimed at modeling an ecologically valid setting. Tasks driven by goals with symmetrical roles between participants were most common in examining collaboration in digital environments and media. Interestingly, most studies limited information transfer to digital formats such as screen-based exchanges, with fewer studies utilizing both analog and digital transfer methods. FNIRS studies frequently examined different interaction scenarios or media across various conditions, while EEG studies featured a broader variety of unique interaction settings. Regardless of interaction setup, non-verbal compared to verbal communication was investigated somewhat more often, albeit the difference was small. Analytical approaches were mostly constrained to functional connectivity, with limited application of effective connectivity. Furthermore, very few studies applied machine learning, highlighting a methodological gap given the high-dimensional, complex data involved. Research has primarily concentrated on executive and social functions during digital collaboration, while fewer studies have addressed creativity, memory, or language explicitly. Our thorough review of existing literature indicates a solid foundation of research, although its scope can be extended. To better understand the complex dynamics of digital collaboration, it is essential to expand both the methodological framework and the conceptual basis of hyperscanning research.</p>
<p>Before discussing the most important aspects and main takeaways in detail, we summarize the central conclusions here. First, we recommend a set of reporting guidelines for hyperscanning studies to increase transparency, reproducibility, and meaningful comparison across the literature, including a checklist to help authors document essential methodological information. Second, we emphasize the contribution of the present review and the associated online database in organizing hyperscanning research into standardized categories. Third, based on the diversity of study designs identified, we recommend conducting systematic empirical comparisons of differently structured interaction scenarios to support the development of theories and the application of findings to real-world settings. Fourth, our results show that the unimodal use of measurement modalities currently dominates the hyperscanning research on digital collaboration. However, we identify considerable potential in integrating multiple modalities to construct multidimensional models of the neurophysiological signatures of collaboration. Such approaches could provide complementary perspectives on collaborative cognition and how digital components influence associated cognitive and social processes. Fifth, the present review also suggests that the choice of measurement modality is strongly associated with not only the analytic approach but also the specific cognitive processes investigated. We therefore argue that multivariate and multimodal approaches will be necessary to disentangle the involvement of the many cognitive processes involved in collaborative behavior, and to capture fine-grained effects associated with different degrees of digital mediation. Finally, we encourage readers and researchers to increase the value of this work by contributing additional sources to the online database, thereby supporting the continued comprehensiveness of the InterBrainDB on digital collaboration.</p>
<sec>
<label>4.1</label>
<title>Structuring principles: guidelines for reporting hyperscanning results</title>
<p>To promote transparency, reproducibility, and meaningful comparison across hyperscanning studies, we strongly encourage authors to report specific methodological details that directly impact the interpretation of inter-brain connectivity and synchronization. Based on our review of the literature, we outlined a set of minimal reporting guidelines that, if followed, greatly improve the interpretability and meta-analytic usefulness of future research (see <xref ref-type="table" rid="T7">Table 7</xref>).</p>
<table-wrap position="float" id="T7">
<label>Table 7</label>
<caption><p>Guidelines for reporting hyperscanning findings.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Category</bold></th>
<th valign="top" align="left"><bold>Checklist item</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Sample size</td>
<td valign="top" align="left">Number of participants or pairings included in analyses (i.e., not necessarily how many were originally recruited)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="6">Participant metadata</td>
<td valign="top" align="left">Age of participants</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Gender distribution within sample</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Familiarity or relationship between participants (e.g., friends, parent-child)</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Pairing characteristics (e.g., dyads, triads)</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Gender pairing setup (e.g., same-sex pairs)</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Relevant participant traits (e.g., handedness, prior experience)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Measurement modality</td>
<td valign="top" align="left">Type and technical details of measurements (e.g., EEG, number of electrodes)</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Use and joint analysis of multiple modalities (e.g., EEG and eye-tracking)</td>
</tr>
<tr>
<td valign="top" align="left">Recording configuration</td>
<td valign="top" align="left">Timing of neural recordings (e.g., simultaneous, post hoc sync)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="2">Communication parameters</td>
<td valign="top" align="left">Allowed communication types (e.g., verbal, gesture)</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Form of interaction (e.g., open-ended conversation, face visibility)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="4">Task design and structure</td>
<td valign="top" align="left">Spatial setup (e.g., face-to-face, side-by-side) with diagram if possible</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Interaction media (e.g., shared screen, physical objects)</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Task symmetry (e.g., identical vs. distinct roles)</td>
</tr>
 <tr>
<td/>
<td valign="top" align="left">Task paradigm (e.g., cooperation, imitation, economic exchange)</td>
</tr>
<tr>
<td valign="top" align="left">Analysis</td>
<td valign="top" align="left">Level of analysis (e.g., intra-brain, inter-brain) with clear separation in the methods section</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>This checklist summarizes recommended reporting practices for hyperscanning studies to improve transparency, reproducibility, and comparability across experiments. Items are grouped by major methodological domains, including participant characteristics, measurement modalities, recording configuration, communication parameters, task design, and analytic levels. Examples in parentheses illustrate typical reporting details but are not exhaustive.</p>
</table-wrap-foot>
</table-wrap>
<p>Firstly, researchers should thoroughly report participant metadata. This includes age, gender, familiarity among participants, and any pairing characteristics that might systematically influence neural coupling or social behavior (e.g., handedness, prior experiences). Studies that do not disclose this information limit the ability to meaningfully contextualize, compare, or replicate their results.</p>
<p>Secondly, it is crucial to specify the temporal setup of the neural recordings, i.e., whether data were recorded simultaneously across participants or if signal streams were aligned afterward. Although both methods are valid, they have different implications for interpreting time-locked neural coupling and inter-brain dynamics. On one side, simultaneous recordings offer higher ecological validity for social components compared to single-person recordings; on the other side, single-person recordings provide greater experimental control and easier replication that is less dependent on the sample (<xref ref-type="bibr" rid="B29">Fan et al., 2021</xref>). The included studies used simultaneous acquisition (as part of the <italic>comparison</italic> inclusion criterion). However, during full-text screening, a notable subset (<italic>n</italic> = 46) was excluded because they relied on separate acquisitions with later alignment, even though the abstract screening suggested a form of real-time social interaction.</p>
<p>Thirdly, authors should clearly state whether verbal communication was allowed between participants and, if so, describe the nature and structure of the dialogue. Was communication entirely open-ended, limited to specific prompts, or completely prohibited? The level of verbal interaction fundamentally alters the cognitive and emotional demands of the task (<xref ref-type="bibr" rid="B56">Lu et al., 2020</xref>). Apart from affecting social, cognitive, emotional, and sensory processes, it can also introduce signal noise through muscle activity. Studies should specify whether verbal and/or non-verbal communication was possible during the experiment, as both can influence interpersonal neural synchrony in collaborative settings (<xref ref-type="bibr" rid="B81">Shih et al., 2024</xref>).</p>
<p>Fourthly, it is essential to describe the visual and spatial arrangement of the participants during the experiment to help readers better understand the sensorimotor and perceptual context of the interaction. Specifically, were participants able to see each other&#x00027;s facial expressions, gestures, and body movements? How were they positioned relative to each other (e.g., face-to-face, side-by-side, separated by a screen)? We recommend including a schematic diagram or photo of the experimental setup whenever possible.</p>
<p>Fifthly, we recommend clearly stating whether analyses were conducted at an intra- or inter-individual level or both. To ensure a structured and clear understanding of the methods, we suggest dividing the description of methods accordingly. For studies involving multimodal data collection, we recommend separating the description of analysis methods by measurement modality and clearly specifying how the modalities were combined and analyzed together.</p>
<p>By following these reporting practices, researchers can ensure their hyperscanning studies are both understandable on their own and useful to the larger research community. Future meta-analyses, database-driven reviews, and cumulative science efforts will depend heavily on the consistency and completeness of this essential information.</p>
</sec>
<sec>
<label>4.2</label>
<title>Study designs for measuring collaboration in the digital age</title>
<p>The wide variety of how factors relevant for digital collaboration are implemented across hyperscanning studies, whether through virtual interfaces, perceptual barriers, or hybrid setups, illustrates the diversity and adaptability of current research methods. A key contribution of this review is the systematic organization of interaction scenarios and media, offering a clear overview of how researchers define digital collaboration. This categorization is an important initial step toward combining findings on remote collaboration across different modalities. Building on this foundation, future research will benefit from designs that directly compare multiple measurement approaches during various interaction phases, as such methods can help develop comprehensive theoretical models and support applying hyperscanning insights to real-world digital environments (<xref ref-type="bibr" rid="B79">Schneider et al., 2021</xref>).</p>
</sec>
<sec>
<label>4.3</label>
<title>Modality: from unimodal to multimodal hyperscanning in digital collaboration</title>
<p>Across the reviewed literature, distinct (neuro-)physiological modalities were associated with specific aspects of collaborative cognition. This pattern could reflect both methodological affordances and historical trends in the field of hyperscanning. FNIRS studies more often targeted social cognition and language processes compared to EEG, leveraging spatial specificity and relative robustness to motion artifacts to probe regions such as the inferior frontal gyrus or temporoparietal junction (<xref ref-type="bibr" rid="B21">Czeszumski et al., 2022</xref>). EEG, by contrast, was predominantly used to study executive functions and visuospatial cognition, leveraging its millisecond-level temporal resolution and sensitivity to oscillatory synchronization across distributed networks. Eye-tracking hyperscanning was used less frequently and primarily to investigate attentional coordination between collaborators, consistent with gaze as a behavioral marker of visual attention (<xref ref-type="bibr" rid="B31">Ferencov&#x000E1; et al., 2021</xref>). Yet relatively few eye-tracking studies explicitly focused on visuospatial collaborative processes, despite shared digital workspaces being central to many contemporary platforms (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure 5</xref>). This represents a missed opportunity to characterize how gaze alignment supports joint reference, turn-taking, and spatial negotiation in digital environments.</p>
<p>Multimodal approaches remain rare. The most common pairing combined EEG with eye-tracking or ECG, the latter likely reflecting existing EEG measurement infrastructures. Notably, we did not identify any hyperscanning studies that integrated EEG and fNIRS in digital interaction contexts, despite strong theoretical motivation. Such integration could jointly capture the spatial localization and temporal dynamics of inter-brain coupling (for review see <xref ref-type="bibr" rid="B52">Li et al., 2022</xref>), offering a more complete description of collaborative processes, especially in more applied, naturalistic contexts (<xref ref-type="bibr" rid="B73">Pinto-Orellana et al., 2024</xref>). The infrequent use of multimodal approaches in hyperscanning studies highlights an important area for future research, with the potential to significantly advance methodological rigor and ecological validity in this field.</p>
<p>Multimodal acquisition is not trivial: hardware integration, increased setup complexity and cost, paradigm design that accommodates diverse temporal and spatial constraints, and a lack of standardized pipelines for joint analysis can be challenging (<xref ref-type="bibr" rid="B37">Gado et al., 2023</xref>; <xref ref-type="bibr" rid="B52">Li et al., 2022</xref>; <xref ref-type="bibr" rid="B73">Pinto-Orellana et al., 2024</xref>). Nevertheless, multimodal acquisition can provide a uniquely holistic view of how neural, physiological, and behavioral dynamics systematically interact during collaborative interaction (<xref ref-type="bibr" rid="B3">Balconi and Angioletti, 2023</xref>; <xref ref-type="bibr" rid="B37">Gado et al., 2023</xref>; <xref ref-type="bibr" rid="B52">Li et al., 2022</xref>; <xref ref-type="bibr" rid="B59">L&#x000FC;hmann et al., 2020</xref>). Work outside collaborative digital contexts already illustrates this potential (<xref ref-type="bibr" rid="B3">Balconi and Angioletti, 2023</xref>; <xref ref-type="bibr" rid="B107">Zhao et al., 2023</xref>). For example, joint EEG-fNIRS acquisition during a motor synchronization task yielded complementary inter-brain coherence indices across modalities (<xref ref-type="bibr" rid="B3">Balconi and Angioletti, 2023</xref>). Hyperscanning in digital collaboration could benefit from adopting symmetric, unsupervised EEG-fNIRS fusion pipelines (for review see <xref ref-type="bibr" rid="B18">Codina et al., 2025</xref>) to capture hidden inter-brain coupling beyond standard functional connectivity metrics.</p>
<p>These examples highlight how multimodal setups can shift interpretation from isolated signals to integrated multidimensional models of collaboration. The scarcity of such approaches in digital collaboration studies indicates substantial untapped potential, especially as hardware synchronization and computational fusion methods advance (<xref ref-type="bibr" rid="B18">Codina et al., 2025</xref>; <xref ref-type="bibr" rid="B26">Dissanayake et al., 2025</xref>). This is particularly critical in digital contexts, where key interaction channels (e.g., eye contact, gestures, spatial proximity) are filtered, delayed, or absent. Different modalities exhibit varying degrees of sensitivity to diverse stimuli (<xref ref-type="bibr" rid="B87">Stuldreher et al., 2020</xref>), making multimodal metrics the most robust choice for studying complex, dynamic, and digitally mediated forms of collaboration.</p>
<p>In sum, future work should move beyond simple one-to-one modality-process mapping. In digitally mediated environments, where cognitive, perceptual, and communicative cues are systematically altered, each modality provides a complementary perspective on collaborative cognition. Designing studies that leverage these strengths in combination, while acknowledging their limitations, will be crucial for advancing the cognitive neuroscience of digital interaction.</p>
</sec>
<sec>
<label>4.4</label>
<title>Analysis approaches and cognitive targets: how methods shape what we can see</title>
<p>Functional connectivity analyses (e.g., IBS measures) remain dominant as an analytical framework across hyperscanning modalities. This prevalence may reflect both conceptual accessibility and methodological convenience, especially in unimodal setups. Temporal-domain analyses were found to be common as well, especially in EEG, ECG, EDA, and eye-tracking, where high temporal resolution lends itself naturally to event-locked and synchrony-based metrics of coordination and attention aspects of collaboration. On the other hand, fNIRS studies frequently employed spatial-domain analyses to leverage its superior cortical specificity. These modality-aligned analytical preferences collectively restrict the kinds of collaborative processes that can be detected and meaningfully interpreted.</p>
<p>Interestingly, only four out of 45 studies incorporated measures of effective connectivity. A possible explanation could be that effective connectivity measures require explicit biophysical or statistical generative models and thus impose stricter assumptions on the data. Violations of these assumptions can produce misleading causal inferences (<xref ref-type="bibr" rid="B35">Friston, 2011</xref>). This makes effective measures harder to interpret and less reliable in dynamic social contexts with shared stimuli and mutual influence. Functional connectivity, on the other hand, is more robust and intuitive for the naturalistic data that increasingly characterize hyperscanning studies.</p>
<p>These methodological constraints have direct implications for the cognitive functions that become tractable targets of investigation. Although many collaborative tasks inherently engage multiple cognitive processes, a consistent pattern emerged: executive functions and social cognition are the primary targets of interest. This emphasis is expected, given their role in coordinating shared goals, managing joint attention, regulating turn-taking, and negotiating decisions during collaboration. More fine-grained characterization of the cognitive dynamics underlying collaboration will likely require multivariate and multimodal methods capable of disentangling overlapping components within complex tasks. Combining complementary modalities with data-driven methods may clarify how specific subprocesses, for example, shared attention, perspective taking, and goal monitoring, unfold over time and how they jointly support digitally mediated collaboration. A fine distinction such as this also holds potential for providing specific feedback on interaction processes that may be diminished or lost when interaction shifts from analog to digital (<xref ref-type="bibr" rid="B15">Chuang and Hsu, 2023</xref>; <xref ref-type="bibr" rid="B74">P&#x000F6;ys&#x000E4;-Tarhonen et al., 2021</xref>).</p>
<p>Given the limitations of conventional analyses for capturing these complex dynamics, machine learning represents an underutilized but promising avenue for advancing hyperscanning research. One reason for the underuse of machine learning probably lies in the field&#x00027;s early developmental stage: many research groups prioritize traditional statistical approaches to establish basic mechanisms of joint action before introducing more complex computational models that are difficult to interpret. Yet, the promise of machine learning goes beyond classification. Its strength lies in modeling non-linear, high-dimensional, and temporally evolving dependencies across multiple interacting processes. For instance, combining fNIRS and EEG features in intra-brain contexts has been shown to enable classification of cognitive workload via bivariate connectivity metrics, an approach readily adaptable to hyperscanning data (<xref ref-type="bibr" rid="B11">Cao et al., 2022</xref>). In digitally mediated environments, where communication channels are constrained or altered by technology, such dynamic, cross-modal modeling may be essential for explaining systematic differences between analog and digital collaboration.</p>
<p>A promising direction involves multimodal, physiology-informed machine learning that jointly integrates neural signals (EEG, fNIRS), autonomic measures (ECG, EDA), behavioral indicators (e.g., gaze, EMG), and contextual information into a shared, multidimensional representational space (<xref ref-type="bibr" rid="B18">Codina et al., 2025</xref>). Most current fusion strategies rely on methods such as feature concatenation or decision-level fusion, whereas more powerful approaches (e.g., joint independent component analysis) remain underused despite their ability to reveal latent, cross-domain structure (<xref ref-type="bibr" rid="B18">Codina et al., 2025</xref>). Embedding physiological priors, such as neurovascular coupling models or autonomic response patterns, directly into preprocessing and fusion could push hyperscanning beyond simple synchrony indices toward richer, context-aware models of collaborative interaction. Progress in this direction is tightly coupled to the availability of open-source, high-quality datasets. As highlighted in a recent overview of human synchronization datasets by (<xref ref-type="bibr" rid="B89">Velletaz et al. 2025</xref>), multimodal data remain scattered and underutilized. Expanding accessible, well-curated datasets will be critical for enabling advanced analytic developments and for refining our understanding of the fine-grained, interacting cognitive processes that support digitally mediated collaboration.</p>
</sec>
<sec>
<label>4.5</label>
<title>Limitations</title>
<p>Several limitations of the present review should be acknowledged. First, by focusing on collaborative tasks with digital components, we excluded hyperscanning studies that are, for example, centered on imitation, purely economic exchange paradigms, or shared attention. The resulting distribution of paradigms does therefore not reflect the full landscape of hyperscanning research. Nonetheless, even within this constrained scope, we identified several more open-ended and applied designs, suggesting a broader trend toward ecologically valid applications. Second, we deliberately excluded fMRI and MEG to focus on modalities amenable to mobile and applied use. One could argue, however, that fMRI and MEG hyperscanning inherently involves digital elements by design. These modalities are therefore relevant to digital interaction. The associated living database is intended to be extended over time to incorporate such studies, enabling a more comprehensive overview. Notably, although the present review is static, the InterBrainDB platform is designed as a living, open-source resource. New studies can be added and re-labeled over time, and analyses can be replicated with different inclusion filters (e.g., adding modalities or relaxing collaboration criteria). In this way, some of the limitations of the present synthesis, particularly with respect to coverage and modality scope, can be progressively mitigated as the database grows.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This systematic review synthesizes hyperscanning research on digitally mediated collaboration and reveals a field that is expanding in scope, albeit still constrained by methodological conservatism. Across 45 studies, most relied on unimodal EEG or fNIRS, functional connectivity analyses, and tasks targeting executive and social cognition, while multimodal measurements, effective connectivity, and machine-learning approaches remained rare. Remote digital collaboration was typically operationalized through study designs that isolate or reconfigure interaction channels, highlighting the need for systematic comparisons across graded levels of digitalization. Such comparisons are crucial for investigating how specific technological affordances, altering interaction channels and consequently social cues, shape interpersonal neural, physiological, and behavioral dynamics.</p>
<p>In summary, these patterns highlight both the adaptability of hyperscanning to emerging technological contexts and the limitations that prevent the field from accessing deeper mechanistic explanations of digitally mediated interaction and collaboration. To accelerate progress, we outline reporting practices aimed at improving transparency, reproducibility, and cross-study comparability, and introduce InterBrainDB, a living open-access resource for organizing emerging hyperscanning studies across modalities, paradigms, and analytic approaches.</p>
<p>Future work will benefit from greater multimodal integration, physiologically grounded data fusion strategies, and machine-learning models capable of capturing non-linear and temporally evolving dependencies across brains, bodies, and digital environments. Progress also requires a more fine-grained coverage of cognitive processes to distinguish how each subprocess unfolds within digitally mediated collaboration.</p>
<p>Advancing these methodological and conceptual frontiers is essential for building comprehensive, ecologically grounded models of collaborative behavior in the digital age. Such models will allow cognitive neuroscience to move beyond mapping synchrony toward understanding the mechanisms by which humans coordinate through and with technology.</p></sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>Publicly available datasets were analyzed in this study. This data can be found at: <ext-link ext-link-type="uri" xlink:href="https://websites.fraunhofer.de/interbraindb/">https://websites.fraunhofer.de/interbraindb/</ext-link>.</p>
</sec>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>AV: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Software, Validation, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. A-MB: Methodology, Project administration, Supervision, Validation, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. MV: Funding acquisition, Methodology, Project administration, Resources, Supervision, Validation, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<ack><title>Acknowledgments</title><p>We would like to thank all colleagues who provided additional literature suggestions, thereby contributing to the comprehensiveness of this review. We would further like to thank Meredith Sprengel for her advice on conducting a comprehensive literature review. We acknowledge the use of generative language models (GPT-4, GPT-5), DeepL, and Grammarly during the writing process of this manuscript.</p></ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The authors A-MB, MV declared that they were an editorial board member of Frontiers, at the time of submission. This had no impact on the peer review process and the final decision.</p>
</sec>
<sec sec-type="ai-statement" id="s9">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. We acknowledge the use of generative language models (GPT-4, GPT-5), DeepL, and Grammarly during the writing process of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnrgo.2026.1756956/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnrgo.2026.1756956/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Astolfi</surname> <given-names>L.</given-names></name> <name><surname>Toppi</surname> <given-names>J.</given-names></name> <name><surname>Ciaramidaro</surname> <given-names>A.</given-names></name> <name><surname>Vogel</surname> <given-names>P.</given-names></name> <name><surname>Freitag</surname> <given-names>C. M.</given-names></name> <name><surname>Siniatchkin</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Raising the bar: can dual scanning improve our understanding of joint action?</article-title> <source>NeuroImage</source> <volume>216</volume>:<fpage>116813</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.116813</pub-id><pub-id pub-id-type="pmid">32276053</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bae</surname> <given-names>C.</given-names></name> <name><surname>Montello</surname> <given-names>D.</given-names></name> <name><surname>Hegarty</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>Wayfinding in pairs: comparing the planning and navigation performance of dyads and individuals in a real-world environment</article-title>. <source>Cogn. Res. Principles Implications</source> <volume>9</volume>:<fpage>40</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41235-024-00563-9</pub-id><pub-id pub-id-type="pmid">38902485</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balconi</surname> <given-names>M.</given-names></name> <name><surname>Angioletti</surname> <given-names>L.</given-names></name></person-group> (<year>2023</year>). <article-title>Hemodynamic and electrophysiological biomarkers of interpersonal tuning during interoceptive synchronization</article-title>. <source>Information</source> <volume>14</volume>:<fpage>289</fpage>. doi: <pub-id pub-id-type="doi">10.3390/info14050289</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balconi</surname> <given-names>M.</given-names></name> <name><surname>Angioletti</surname> <given-names>L.</given-names></name> <name><surname>Cassioli</surname> <given-names>F.</given-names></name></person-group> (<year>2023</year>). <article-title>Hyperscanning EEG paradigm applied to remote vs. face-to-face learning in managerial contexts: which is better?</article-title> <source>Brain Sci.</source> <volume>13</volume>:<fpage>356</fpage>. doi: <pub-id pub-id-type="doi">10.3390/brainsci13020356</pub-id><pub-id pub-id-type="pmid">36831899</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balconi</surname> <given-names>M.</given-names></name> <name><surname>Crivelli</surname> <given-names>D.</given-names></name> <name><surname>Cassioli</surname> <given-names>F.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;We will let you know&#x0201D;: an assessment of digital vs. face-to-face job interviews via EEG connectivity analysis</article-title>. <source>Information</source> <volume>13</volume>:<fpage>312</fpage>. doi: <pub-id pub-id-type="doi">10.3390/info13070312</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balters</surname> <given-names>S.</given-names></name> <name><surname>Baker</surname> <given-names>J. M.</given-names></name> <name><surname>Hawthorne</surname> <given-names>G.</given-names></name> <name><surname>Reiss</surname> <given-names>A. L.</given-names></name></person-group> (<year>2020</year>). <article-title>Capturing human interaction in the virtual age: a perspective on the future of fNIRS hyperscanning</article-title>. <source>Front. Hum. Neurosci.</source> <volume>14</volume>:<fpage>588494</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2020.588494</pub-id><pub-id pub-id-type="pmid">33240067</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balters</surname> <given-names>S.</given-names></name> <name><surname>Baker</surname> <given-names>J. M.</given-names></name> <name><surname>Hawthorne</surname> <given-names>G.</given-names></name> <name><surname>Reiss</surname> <given-names>A. L.</given-names></name></person-group> (<year>2021</year>). <article-title>Inter-brain synchrony and innovation in a zoom world using analog and digital manipulatives</article-title>. <source>Design Think. Res.</source> <fpage>9</fpage>&#x02013;<lpage>32</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-76324-4_2</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Balters</surname> <given-names>S.</given-names></name> <name><surname>Miller</surname> <given-names>J. G.</given-names></name> <name><surname>Li</surname> <given-names>R.</given-names></name> <name><surname>Hawthorne</surname> <given-names>G.</given-names></name> <name><surname>Reiss</surname> <given-names>A. L.</given-names></name></person-group> (<year>2023</year>). <article-title>Virtual (zoom) interactions alter conversational behavior and interbrain coherence</article-title>. <source>J. Neurosci.</source> <volume>43</volume>, <fpage>2568</fpage>&#x02013;<lpage>2578</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.1401-22.2023</pub-id><pub-id pub-id-type="pmid">36868852</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barde</surname> <given-names>A.</given-names></name> <name><surname>Gumilar</surname> <given-names>I.</given-names></name> <name><surname>Hayati</surname> <given-names>A. F.</given-names></name> <name><surname>Dey</surname> <given-names>A.</given-names></name> <name><surname>Lee</surname> <given-names>G.</given-names></name> <name><surname>Billinghurst</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>A review of hyperscanning and its use in virtual environments</article-title>. <source>Informatics</source> <volume>7</volume>:<fpage>55</fpage>. doi: <pub-id pub-id-type="doi">10.3390/informatics7040055</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bie&#x00144;kiewicz</surname> <given-names>M. M. N.</given-names></name> <name><surname>Smykovskyi</surname> <given-names>A. P.</given-names></name> <name><surname>Olugbade</surname> <given-names>T.</given-names></name> <name><surname>Janaqi</surname> <given-names>S.</given-names></name> <name><surname>Camurri</surname> <given-names>A.</given-names></name> <name><surname>Bianchi-Berthouze</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Bridging the gap between emotion and joint action</article-title>. <source>Neurosci. Biobehav. Rev.</source> <volume>131</volume>, <fpage>806</fpage>&#x02013;<lpage>833</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neubiorev.2021.08.014</pub-id><pub-id pub-id-type="pmid">34418437</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>J.</given-names></name> <name><surname>Garro</surname> <given-names>E. M.</given-names></name> <name><surname>Zhao</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>EEG/fNIRS based workload classification using functional brain connectivity and machine learning</article-title>. <source>Sensors</source> <volume>22</volume>:<fpage>7623</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s22197623</pub-id><pub-id pub-id-type="pmid">36236725</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carollo</surname> <given-names>A.</given-names></name> <name><surname>Esposito</surname> <given-names>G.</given-names></name></person-group> (<year>2024</year>). <article-title>Hyperscanning literature after two decades of neuroscientific research: a scientometric review</article-title>. <source>Neuroscience</source> <volume>551</volume>, <fpage>345</fpage>&#x02013;<lpage>354</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroscience.2024.05.045</pub-id><pub-id pub-id-type="pmid">38866073</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cheng</surname> <given-names>S.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Shen</surname> <given-names>X.</given-names></name> <name><surname>Chen</surname> <given-names>Y.</given-names></name> <name><surname>Dey</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Collaborative eye tracking based code review through real-time shared gaze visualization</article-title>. <source>Front. Comput. Sci.</source> <volume>16</volume>:<fpage>163704</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s11704-020-0422-1</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cheng</surname> <given-names>X.</given-names></name> <name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name></person-group> (<year>2019</year>). <article-title>Coordination elicits synchronous brain activity between co-actors: frequency ratio matters</article-title>. <source>Front. Neurosci.</source> <volume>13</volume>:<fpage>1071</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2019.01071</pub-id><pub-id pub-id-type="pmid">31680812</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chuang</surname> <given-names>C.-H.</given-names></name> <name><surname>Hsu</surname> <given-names>H.-C.</given-names></name></person-group> (<year>2023</year>). <article-title>Pseudo-mutual gazing enhances interbrain synchrony during remote joint attention tasking</article-title>. <source>Brain Behav.</source> <volume>13</volume>:<fpage>e3181</fpage>. doi: <pub-id pub-id-type="doi">10.1002/brb3.3181</pub-id><pub-id pub-id-type="pmid">37496332</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chuang</surname> <given-names>T.-M.</given-names></name> <name><surname>Peng</surname> <given-names>P.-C.</given-names></name> <name><surname>Su</surname> <given-names>Y.-K.</given-names></name> <name><surname>Lin</surname> <given-names>S.-H.</given-names></name> <name><surname>Tseng</surname> <given-names>Y.-L.</given-names></name></person-group> (<year>2024</year>). <article-title>Exploring inter-brain electroencephalogram patterns for social cognitive assessment during jigsaw puzzle solving</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng.</source> <volume>32</volume>, <fpage>422</fpage>&#x02013;<lpage>430</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TNSRE.2024.3352036</pub-id><pub-id pub-id-type="pmid">38198273</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ciaramidaro</surname> <given-names>A.</given-names></name> <name><surname>Toppi</surname> <given-names>J.</given-names></name> <name><surname>Vogel</surname> <given-names>P.</given-names></name> <name><surname>Freitag</surname> <given-names>C. M.</given-names></name> <name><surname>Siniatchkin</surname> <given-names>M.</given-names></name> <name><surname>Astolfi</surname> <given-names>L.</given-names></name></person-group> (<year>2024</year>). <article-title>Synergy of the mirror neuron system and the mentalizing system in a single brain and between brains during joint actions</article-title>. <source>NeuroImage</source> <volume>299</volume>:<fpage>120783</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2024.120783</pub-id><pub-id pub-id-type="pmid">39187218</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Codina</surname> <given-names>T.</given-names></name> <name><surname>Blankertz</surname> <given-names>B.</given-names></name> <name><surname>L&#x000FC;hmann</surname> <given-names>A.</given-names></name> von</person-group> (<year>2025</year>). <article-title>Multimodal fNIRS-EEG sensor fusion: review of data-driven methods and perspective for naturalistic brain imaging</article-title>. <source>Imag. Neurosci.</source> <volume>3</volume>:<fpage>974</fpage>. doi: <pub-id pub-id-type="doi">10.1162/IMAG.a.974</pub-id><pub-id pub-id-type="pmid">41211102</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cross</surname> <given-names>Z. R.</given-names></name> <name><surname>Chatburn</surname> <given-names>A.</given-names></name> <name><surname>Melberzs</surname> <given-names>L.</given-names></name> <name><surname>Temby</surname> <given-names>P.</given-names></name> <name><surname>Pomeroy</surname> <given-names>D.</given-names></name> <name><surname>Schlesewsky</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Task-related, intrinsic oscillatory and aperiodic neural activity predict performance in naturalistic team-based training scenarios</article-title>. <source>Sci. Rep.</source> <volume>12</volume>:<fpage>16172</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-022-20704-8</pub-id><pub-id pub-id-type="pmid">36171478</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Czeszumski</surname> <given-names>A.</given-names></name> <name><surname>Eustergerling</surname> <given-names>S.</given-names></name> <name><surname>Lang</surname> <given-names>A.</given-names></name> <name><surname>Menrath</surname> <given-names>D.</given-names></name> <name><surname>Gerstenberger</surname> <given-names>M.</given-names></name> <name><surname>Schuberth</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Hyperscanning: a valid method to study neural inter-brain underpinnings of social interaction</article-title>. <source>Front. Hum. Neurosci.</source> <volume>14</volume>:<fpage>39</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2020.00039</pub-id><pub-id pub-id-type="pmid">32180710</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Czeszumski</surname> <given-names>A.</given-names></name> <name><surname>Liang</surname> <given-names>S. H.</given-names></name> <name><surname>Dikker</surname> <given-names>S.</given-names></name> <name><surname>K&#x000F6;nig</surname> <given-names>P.</given-names></name> <name><surname>Lee</surname> <given-names>C. P.</given-names></name> <name><surname>Koole</surname> <given-names>S. L.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Cooperative behavior evokes interbrain synchrony in the prefrontal and temporoparietal cortex: a systematic review and meta-analysis of fNIRS hyperscanning studies</article-title>. <source>eNeuro.</source> 9:ENEURO.0268&#x02013;21.2022. doi: <pub-id pub-id-type="doi">10.1523/ENEURO.0268-21.2022</pub-id><pub-id pub-id-type="pmid">35365502</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Delaherche</surname> <given-names>E.</given-names></name> <name><surname>Dumas</surname> <given-names>G.</given-names></name> <name><surname>Nadel</surname> <given-names>J.</given-names></name> <name><surname>Chetouani</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Automatic measure of imitation during social interaction: a behavioral and hyperscanning-EEG benchmark</article-title>. <source>Pattern Recogn. Lett.</source> <volume>66</volume>, <fpage>118</fpage>&#x02013;<lpage>126</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.patrec.2014.09.002</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Desch&#x000EA;nes</surname> <given-names>A.-A.</given-names></name></person-group> (<year>2024</year>). <article-title>Digital literacy, the use of collaborative technologies, and perceived social proximity in a hybrid work environment: technology as a social binder</article-title>. <source>Comput. Hum. Behav. Rep.</source> <volume>13</volume>:<fpage>100351</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.chbr.2023.100351</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dikker</surname> <given-names>S.</given-names></name> <name><surname>Michalareas</surname> <given-names>G.</given-names></name> <name><surname>Oostrik</surname> <given-names>M.</given-names></name> <name><surname>Serafimaki</surname> <given-names>A.</given-names></name> <name><surname>Kahraman</surname> <given-names>H. M.</given-names></name> <name><surname>Struiksma</surname> <given-names>M. E.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Crowdsourcing neuroscience: inter-brain coupling during face-to-face interactions outside the laboratory</article-title>. <source>NeuroImage</source> <volume>227</volume>:<fpage>117436</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.117436</pub-id><pub-id pub-id-type="pmid">33039619</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dikker</surname> <given-names>S.</given-names></name> <name><surname>Wan</surname> <given-names>L.</given-names></name> <name><surname>Davidesco</surname> <given-names>I.</given-names></name> <name><surname>Kaggen</surname> <given-names>L.</given-names></name> <name><surname>Oostrik</surname> <given-names>M.</given-names></name> <name><surname>McClintock</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Brain-to-brain synchrony tracks real-world dynamic group interactions in the classroom</article-title>. <source>Curr. Biol.</source> <volume>27</volume>, <fpage>1375</fpage>&#x02013;<lpage>1380</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cub.2017.04.002</pub-id><pub-id pub-id-type="pmid">28457867</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dissanayake</surname> <given-names>T.</given-names></name> <name><surname>Muller</surname> <given-names>K.-R.</given-names></name> <name><surname>von Luhmann</surname> <given-names>A.</given-names></name></person-group> (<year>2025</year>). <article-title>Deep learning from diffuse optical oximetry time-series: an fNIRS-focused review Q18 of recent advancements and future directions</article-title>. <source>IEEE Rev. Biomed. Eng.</source> 1&#x02013;22. doi: <pub-id pub-id-type="doi">10.1109/RBME.2025.3617858</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>L.</given-names></name> <name><surname>Hung</surname> <given-names>J.-L.</given-names></name> <name><surname>Li</surname> <given-names>H.</given-names></name> <name><surname>Tang</surname> <given-names>H.</given-names></name> <name><surname>Xie</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Understand group interaction and cognitive state in online collaborative problem solving: leveraging brain-to-brain synchrony data</article-title>. <source>Int. J. Educ. Technol. Higher Educ.</source> <volume>19</volume>:<fpage>52</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41239-022-00356-4</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Duane</surname> <given-names>T. D.</given-names></name> <name><surname>Behrendt</surname> <given-names>T.</given-names></name></person-group> (<year>1965</year>). <article-title>Extrasensory electroencephalographic induction between identical twins</article-title>. <source>Science</source> <volume>150</volume>:<fpage>367</fpage>. doi: <pub-id pub-id-type="doi">10.1126/science.150.3694.367</pub-id><pub-id pub-id-type="pmid">5890891</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fan</surname> <given-names>S.</given-names></name> <name><surname>Dal Monte</surname> <given-names>O.</given-names></name> <name><surname>Chang</surname> <given-names>S. W. C.</given-names></name></person-group> (<year>2021</year>). <article-title>Levels of naturalism in social neuroscience research</article-title>. <source>iScience</source> <volume>24</volume>:<fpage>102702</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.isci.2021.102702</pub-id><pub-id pub-id-type="pmid">34258547</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fang</surname> <given-names>C. M.</given-names></name> <name><surname>Marvez</surname> <given-names>G. R.</given-names></name> <name><surname>ElHaouij</surname> <given-names>N.</given-names></name> <name><surname>Picard</surname> <given-names>R.</given-names></name></person-group> (<year>2022</year>). &#x0201C;Cardiac arrest: evaluating the role of biosignals in gameplay strategies and players&#x00027; physiological synchrony in social deception games,&#x0201D; <italic>CHI Conference on Human Factors in Computing Systems Extended Abstracts, New Orleans, LA, 29 April</italic>&#x02212;<italic>05 May 2022</italic> (New York, NY: ACM), <fpage>1</fpage>&#x02013;<lpage>7</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3491101.3519670</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ferencov&#x000E1;</surname> <given-names>N.</given-names></name> <name><surname>Vi&#x00161;novcov&#x000E1;</surname> <given-names>Z.</given-names></name> <name><surname>Bona Olexov&#x000E1;</surname> <given-names>L.</given-names></name> <name><surname>Tonhajzerov&#x000E1;</surname> <given-names>I.</given-names></name></person-group> (<year>2021</year>). <article-title>Eye pupil - a window into central autonomic regulation via emotional/cognitive processing</article-title>. <source>Physiol. Res.</source> <volume>70</volume>(<supplement>Suppl. 4</supplement>), <fpage>S669</fpage>&#x02013;<lpage>S682</lpage>. doi: <pub-id pub-id-type="doi">10.33549/physiolres.934749</pub-id><pub-id pub-id-type="pmid">35199551</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Findik-Co&#x0015F;kun&#x000E7;ay</surname> <given-names>D.</given-names></name> <name><surname>&#x000C7;akir</surname> <given-names>M. P.</given-names></name></person-group> (<year>2022</year>). <article-title>An investigation of the relationship between joint visual attention and product quality in collaborative business process modeling: a dual eye-tracking study</article-title>. <source>Softw. Syst. Model.</source> <volume>21</volume>, <fpage>2429</fpage>&#x02013;<lpage>2460</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10270-022-00974-6</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fl&#x000F6;sch</surname> <given-names>K.-P.</given-names></name> <name><surname>Flaisch</surname> <given-names>T.</given-names></name> <name><surname>Imhof</surname> <given-names>M. A.</given-names></name> <name><surname>Schupp</surname> <given-names>H. T.</given-names></name></person-group> (<year>2024</year>). <article-title>Alpha/beta oscillations reveal cognitive and affective brain states associated with role taking in a dyadic cooperative game</article-title>. <source>Cereb. Cortex</source> <volume>34</volume>, <fpage>1</fpage>&#x02013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhad487</pub-id><pub-id pub-id-type="pmid">38100327</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fl&#x000F6;sch</surname> <given-names>K.-P.</given-names></name> <name><surname>Flaisch</surname> <given-names>T.</given-names></name> <name><surname>Imhof</surname> <given-names>M. A.</given-names></name> <name><surname>Schupp</surname> <given-names>H. T.</given-names></name></person-group> (<year>2024b</year>). <article-title>Dyadic cooperation with human and artificial agents: event-related potentials trace dynamic role taking during an interactive game</article-title>. <source>Psychophysiology</source> <volume>61</volume>:<fpage>e14433</fpage>. doi: <pub-id pub-id-type="doi">10.1111/psyp.14433</pub-id><pub-id pub-id-type="pmid">37681492</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Friston</surname> <given-names>K. J.</given-names></name></person-group> (<year>2011</year>). <article-title>Functional and effective connectivity: a review</article-title>. <source>Brain Connect.</source> <volume>1</volume>, <fpage>13</fpage>&#x02013;<lpage>36</lpage>. doi: <pub-id pub-id-type="doi">10.1089/brain.2011.0008</pub-id><pub-id pub-id-type="pmid">22432952</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Frith</surname> <given-names>C. D.</given-names></name> <name><surname>Frith</surname> <given-names>U.</given-names></name></person-group> (<year>2008</year>). <article-title>Implicit and explicit processes in social cognition</article-title>. <source>Neuron</source> <volume>60</volume>, <fpage>503</fpage>&#x02013;<lpage>510</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2008.10.032</pub-id><pub-id pub-id-type="pmid">18995826</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gado</surname> <given-names>S.</given-names></name> <name><surname>Lingelbach</surname> <given-names>K.</given-names></name> <name><surname>Wirzberger</surname> <given-names>M.</given-names></name> <name><surname>&#x00026; Vukeli&#x00107;</surname> <given-names>M.</given-names></name></person-group> (<year>2023</year>). <article-title>Decoding mental effort in a quasi-realistic scenario: a feasibility study on multimodal data fusion and classification</article-title>. <source>Sensors</source> <volume>23</volume>:<fpage>6546</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s23146546</pub-id><pub-id pub-id-type="pmid">37514840</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Grasso-Cladera</surname> <given-names>A.</given-names></name> <name><surname>Costa-Cordella</surname> <given-names>S.</given-names></name> <name><surname>Mattoli-S&#x000E1;nchez</surname> <given-names>J.</given-names></name> <name><surname>Vilina</surname> <given-names>E.</given-names></name> <name><surname>Santander</surname> <given-names>V.</given-names></name> <name><surname>Hiltner</surname> <given-names>S. E.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Embodied hyperscanning for studying social interaction: a scoping review of simultaneous brain and body measurements</article-title>. <source>Soc. Neurosci.</source> <volume>20</volume>, <fpage>163</fpage>&#x02013;<lpage>179</lpage>. doi: <pub-id pub-id-type="doi">10.1080/17470919.2024.2409758</pub-id><pub-id pub-id-type="pmid">39387663</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gugnowska</surname> <given-names>K.</given-names></name> <name><surname>Novembre</surname> <given-names>G.</given-names></name> <name><surname>Kohler</surname> <given-names>N.</given-names></name> <name><surname>Villringer</surname> <given-names>A.</given-names></name> <name><surname>Keller</surname> <given-names>P. E.</given-names></name> <name><surname>Sammler</surname> <given-names>D.</given-names></name></person-group> (<year>2022</year>). <article-title>Endogenous sources of interbrain synchrony in duetting pianists</article-title>. <source>Cereb. Cortex</source> <volume>32</volume>, <fpage>4110</fpage>&#x02013;<lpage>4127</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhab469</pub-id><pub-id pub-id-type="pmid">35029645</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gumilar</surname> <given-names>I.</given-names></name> <name><surname>Sareen</surname> <given-names>E.</given-names></name> <name><surname>Bell</surname> <given-names>R.</given-names></name> <name><surname>Stone</surname> <given-names>A.</given-names></name> <name><surname>Hayati</surname> <given-names>A.</given-names></name> <name><surname>Mao</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>A comparative study on inter-brain synchrony in real and virtual environments using hyperscanning</article-title>. <source>Comput. Graph.</source> <volume>94</volume>, <fpage>62</fpage>&#x02013;<lpage>75</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cag.2020.10.003</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hakim</surname> <given-names>U.</given-names></name> <name><surname>Felice</surname> <given-names>S.</given-names></name> <name><surname>de</surname> <given-names>Pinti, P.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Noah</surname> <given-names>J. A.</given-names></name> <name><surname>Ono</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Quantification of inter-brain coupling: a review of current methods used in haemodynamic and electrophysiological hyperscanning studies</article-title>. <source>NeuroImage</source> <volume>280</volume>:<fpage>120354</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2023.120354</pub-id><pub-id pub-id-type="pmid">37666393</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hayati</surname> <given-names>A. F.</given-names></name> <name><surname>Barde</surname> <given-names>A.</given-names></name> <name><surname>Gumilar</surname> <given-names>I.</given-names></name> <name><surname>Momin</surname> <given-names>A.</given-names></name> <name><surname>Lee</surname> <given-names>G.</given-names></name> <name><surname>Chatburn</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Inter-brain synchrony in real-world and virtual reality search tasks using EEG hyperscanning</article-title>. <source>Front. Virtual Reality</source> <volume>6</volume>:<fpage>1469105</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frvir.2025.1469105</pub-id></mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hayne</surname> <given-names>L.</given-names></name> <name><surname>Grant</surname> <given-names>T.</given-names></name> <name><surname>Hirshfield</surname> <given-names>L.</given-names></name> <name><surname>Carter</surname> <given-names>R. M.</given-names></name></person-group> (<year>2023</year>). <article-title>Friend or foe: classifying collaborative interactions using fNIRS</article-title>. <source>Front. Neuroergon.</source> <volume>4</volume>:<fpage>1265105</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnrgo.2023.1265105</pub-id><pub-id pub-id-type="pmid">38234488</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hoffmann</surname> <given-names>A.</given-names></name> <name><surname>Schellhorn</surname> <given-names>A.-M.</given-names></name> <name><surname>Ritter</surname> <given-names>M.</given-names></name> <name><surname>Sachse</surname> <given-names>P.</given-names></name> <name><surname>Maran</surname> <given-names>T.</given-names></name></person-group> (<year>2024</year>). <article-title>Blink synchronization increases over time and predicts problem-solving performance in virtual teams</article-title>. <source>Small Group Res.</source> <volume>55</volume>, <fpage>706</fpage>&#x02013;<lpage>728</lpage>. doi: <pub-id pub-id-type="doi">10.1177/10464964231195618</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hou</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>D.</given-names></name> <name><surname>Gan</surname> <given-names>X.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Group polarization calls for group-level brain communication</article-title>. <source>NeuroImage</source> <volume>264</volume>:<fpage>119739</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2022.119739</pub-id><pub-id pub-id-type="pmid">36356821</pub-id></mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>Y.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Cheng</surname> <given-names>X.</given-names></name></person-group> (<year>2017</year>). <article-title>Brain-to-brain synchronization across two persons predicts mutual prosociality</article-title>. <source>Social Cogn. Affect. Neurosci.</source> <volume>12</volume>, <fpage>1835</fpage>&#x02013;<lpage>1844</lpage>. doi: <pub-id pub-id-type="doi">10.1093/scan/nsx118</pub-id><pub-id pub-id-type="pmid">29040766</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Konvalinka</surname> <given-names>I.</given-names></name> <name><surname>Sebanz</surname> <given-names>N.</given-names></name> <name><surname>Knoblich</surname> <given-names>G.</given-names></name></person-group> (<year>2023</year>). <article-title>The role of reciprocity in dynamic interpersonal coordination of physiological rhythms</article-title>. <source>Cognition</source> <volume>230</volume>:<fpage>105307</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cognition.2022.105307</pub-id><pub-id pub-id-type="pmid">36272361</pub-id></mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>K&#x000FC;tt</surname> <given-names>G. H.</given-names></name> <name><surname>Lee</surname> <given-names>K.</given-names></name> <name><surname>Hardacre</surname> <given-names>E.</given-names></name> <name><surname>Papoutsaki</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;Eye-write,&#x0201D;</article-title> in <source>Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, Glasgow, Scotland, UK, 04&#x02013;09 May 2019</source> (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3290605.3300727</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Le Bars</surname> <given-names>S.</given-names></name> <name><surname>Devaux</surname> <given-names>A.</given-names></name> <name><surname>Nevidal</surname> <given-names>T.</given-names></name> <name><surname>Chambon</surname> <given-names>V.</given-names></name> <name><surname>Pacherie</surname> <given-names>E.</given-names></name></person-group> (<year>2020</year>). <article-title>Agents&#x00027; pivotality and reward fairness modulate sense of agency in cooperative joint action</article-title>. <source>Cognition</source> <volume>195</volume>:<fpage>104117</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cognition.2019.104117</pub-id><pub-id pub-id-type="pmid">31751814</pub-id></mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Leahy</surname> <given-names>O.</given-names></name> <name><surname>Kontaris</surname> <given-names>E.</given-names></name> <name><surname>Gunasekara</surname> <given-names>N.</given-names></name> <name><surname>Hirsch</surname> <given-names>J.</given-names></name> <name><surname>Tachtsidis</surname> <given-names>I.</given-names></name></person-group> (<year>2025</year>). <article-title>Environmental effects on inter-brain coupling: a systematic review</article-title>. <source>Front. Hum. Neurosci.</source> <volume>19</volume>:<fpage>1627457</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2025.1627457</pub-id><pub-id pub-id-type="pmid">40822293</pub-id></mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>L&#x000E9;n&#x000E9;</surname> <given-names>P.</given-names></name> <name><surname>Karran</surname> <given-names>A. J.</given-names></name> <name><surname>Labont&#x000E9;-Lemoyne</surname> <given-names>E.</given-names></name> <name><surname>S&#x000E9;n&#x000E9;cal</surname> <given-names>S.</given-names></name> <name><surname>Fredette</surname> <given-names>M.</given-names></name> <name><surname>Johnson</surname> <given-names>K. J.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Is there collaboration specific neurophysiological activation during collaborative task activity? An analysis of brain responses using electroencephalography and hyperscanning</article-title>. <source>Brain Behav.</source> <volume>11</volume>:<fpage>e2270</fpage>. doi: <pub-id pub-id-type="doi">10.1002/brb3.2270</pub-id><pub-id pub-id-type="pmid">34617691</pub-id></mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>R.</given-names></name> <name><surname>Yang</surname> <given-names>D.</given-names></name> <name><surname>Fang</surname> <given-names>F.</given-names></name> <name><surname>Hong</surname> <given-names>K.-S.</given-names></name> <name><surname>Reiss</surname> <given-names>A. L.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name></person-group> (<year>2022</year>). <article-title>Concurrent fNIRS and EEG for brain function investigation: a systematic, methodology-focused review</article-title>. <source>Sensors</source> <volume>22</volume>:<fpage>5865</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s22155865</pub-id><pub-id pub-id-type="pmid">35957421</pub-id></mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liberati</surname> <given-names>A.</given-names></name> <name><surname>Altman</surname> <given-names>D. G.</given-names></name> <name><surname>Tetzlaff</surname> <given-names>J.</given-names></name> <name><surname>Mulrow</surname> <given-names>C.</given-names></name> <name><surname>G&#x000F8;tzsche</surname> <given-names>P. C.</given-names></name> <name><surname>Ioannidis</surname> <given-names>J. P. A.</given-names></name> <etal/></person-group>. (<year>2009</year>). <article-title>The PRISMA statement for reporting systematic reviews and meta-analyses of studies that evaluate healthcare interventions: explanation and elaboration</article-title>. <source>BMJ</source> <volume>339</volume>:<fpage>b2700</fpage>. doi: <pub-id pub-id-type="doi">10.1136/bmj.b2700</pub-id><pub-id pub-id-type="pmid">19622552</pub-id></mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>R.</given-names></name> <name><surname>Geng</surname> <given-names>B.</given-names></name> <name><surname>Zhang</surname> <given-names>T.</given-names></name> <name><surname>Di</surname> <given-names>Y.uan, Otani, S.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name></person-group> (<year>2019</year>). <article-title>Interplay between prior knowledge and communication mode on teaching effectiveness: interpersonal neural synchronization as a neural marker</article-title>. <source>NeuroImage</source> <volume>193</volume>, <fpage>93</fpage>&#x02013;<lpage>102</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.03.004</pub-id><pub-id pub-id-type="pmid">30851445</pub-id></mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>Y.</given-names></name> <name><surname>Zhong</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>C.</given-names></name> <name><surname>Meng</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Favoritism or bias? Cooperation and competition under different intergroup relationships: evidence from EEG hyperscanning</article-title>. <source>Cereb. Cortex.</source> <volume>34</volume>:<fpage>bhae131</fpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhae131</pub-id><pub-id pub-id-type="pmid">38566514</pub-id></mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname> <given-names>K.</given-names></name> <name><surname>Yu</surname> <given-names>T.</given-names></name> <name><surname>Hao</surname> <given-names>N.</given-names></name></person-group> (<year>2020</year>). <article-title>Creating while taking turns, the choice to unlocking group creative potential</article-title>. <source>NeuroImage</source> <volume>219</volume>:<fpage>117025</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2020.117025</pub-id><pub-id pub-id-type="pmid">32512127</pub-id></mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Luebstorf</surname> <given-names>S.</given-names></name> <name><surname>Allen</surname> <given-names>J. A.</given-names></name> <name><surname>Eden</surname> <given-names>E.</given-names></name> <name><surname>Kramer</surname> <given-names>W. S.</given-names></name> <name><surname>Reiter-Palmon</surname> <given-names>R.</given-names></name> <name><surname>Lehmann-Willenbrock</surname> <given-names>N.</given-names></name></person-group> (<year>2023</year>). <article-title>Digging into &#x0201C;zoom fatigue&#x0201D;: a qualitative exploration of remote work challenges and virtual meeting stressors</article-title>. <source>Merits</source> <volume>3</volume>, <fpage>151</fpage>&#x02013;<lpage>166</lpage>. doi: <pub-id pub-id-type="doi">10.3390/merits3010010</pub-id></mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>L&#x000FC;hmann</surname> <given-names>A.</given-names></name> <name><surname>M&#x000FC;ller</surname> <given-names>K.-R.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;Why build an integrated EEG-NIRS? About the advantages of hybrid bio-acquisition hardware&#x0201E;&#x0201D;</article-title> in <source>Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference 2017</source>, (<publisher-loc>Jeju</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4475</fpage>&#x02013;<lpage>4478</lpage>. doi: <pub-id pub-id-type="doi">10.1109/EMBC.2017.8037850</pub-id><pub-id pub-id-type="pmid">29060891</pub-id></mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>L&#x000FC;hmann</surname> <given-names>A.</given-names></name> <name><surname>von</surname> <given-names>Li, X.</given-names></name> <name><surname>M&#x000FC;ller</surname> <given-names>K.-R.</given-names></name> <name><surname>Boas</surname> <given-names>D. A.</given-names></name> <name><surname>Y&#x000FC;cel</surname> <given-names>M. A.</given-names></name></person-group> (<year>2020</year>). <article-title>Improved physiological noise regression in fNIRS: a multimodal extension of the General Linear Model using temporally embedded Canonical Correlation Analysis</article-title>. <source>NeuroImage</source> <volume>208</volume>:<fpage>116472</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.116472</pub-id><pub-id pub-id-type="pmid">31870944</pub-id></mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Magni</surname> <given-names>G.</given-names></name> <name><surname>Amadini Genovese</surname> <given-names>L.</given-names></name> <name><surname>Riva</surname> <given-names>G.</given-names></name> <name><surname>Repetto</surname> <given-names>C.</given-names></name></person-group> (<year>2025</year>). <article-title>Embodied metaphors and interpersonal synchrony in the digital age: the case of remote working</article-title>. <source>Front. Psychol.</source> <volume>16</volume>:<fpage>1648733</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2025.1648733</pub-id><pub-id pub-id-type="pmid">40823407</pub-id></mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mehta</surname> <given-names>R. K.</given-names></name> <name><surname>Parasuraman</surname> <given-names>R.</given-names></name></person-group> (<year>2013</year>). <article-title>Neuroergonomics: a review of applications to physical and cognitive work</article-title>. <source>Front. Hum. Neurosci.</source> <volume>7</volume>:<fpage>889</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2013.00889</pub-id><pub-id pub-id-type="pmid">24391575</pub-id></mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Melendez-Calderon</surname> <given-names>A.</given-names></name> <name><surname>Komisar</surname> <given-names>V.</given-names></name> <name><surname>Burdet</surname> <given-names>E.</given-names></name></person-group> (<year>2015</year>). <article-title>Interpersonal strategies for disturbance attenuation during a rhythmic joint motor action</article-title>. <source>Physiol. Behav.</source> <volume>147</volume>, <fpage>348</fpage>&#x02013;<lpage>358</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.physbeh.2015.04.046</pub-id><pub-id pub-id-type="pmid">25959343</pub-id></mixed-citation>
</ref>
<ref id="B63">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Montague</surname> <given-names>P. R.</given-names></name> <name><surname>Berns</surname> <given-names>G. S.</given-names></name> <name><surname>Cohen</surname> <given-names>J. D.</given-names></name> <name><surname>McClure</surname> <given-names>S. M.</given-names></name> <name><surname>Pagnoni</surname> <given-names>G.</given-names></name> <name><surname>Dhamala</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2002</year>). <article-title>Hyperscanning: simultaneous fMRI during linked social interactions</article-title>. <source>NeuroImage</source> <volume>16</volume>, <fpage>1159</fpage>&#x02013;<lpage>1164</lpage>. doi: <pub-id pub-id-type="doi">10.1006/nimg.2002.1150</pub-id><pub-id pub-id-type="pmid">12202103</pub-id></mixed-citation>
</ref>
<ref id="B64">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nam</surname> <given-names>C. S.</given-names></name> <name><surname>Choo</surname> <given-names>S.</given-names></name> <name><surname>Huang</surname> <given-names>J.</given-names></name> <name><surname>Park</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Brain-to-brain neural synchrony during social interactions: a systematic review on hyperscanning studies</article-title>. <source>Appl. Sci.</source> <volume>10</volume>:<fpage>6669</fpage>. doi: <pub-id pub-id-type="doi">10.3390/app10196669</pub-id></mixed-citation>
</ref>
<ref id="B65">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Numata</surname> <given-names>T.</given-names></name> <name><surname>Kotani</surname> <given-names>K.</given-names></name> <name><surname>Sato</surname> <given-names>H.</given-names></name></person-group> (<year>2021</year>). <article-title>Relationship between subjective ratings of answers and behavioral and autonomic nervous activities during creative problem-solving via online conversation</article-title>. <source>Front. Neurosci.</source> <volume>15</volume>:<fpage>724679</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2021.724679</pub-id><pub-id pub-id-type="pmid">34671238</pub-id></mixed-citation>
</ref>
<ref id="B66">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nurmi</surname> <given-names>N.</given-names></name> <name><surname>Pakarinen</surname> <given-names>S.</given-names></name></person-group> (<year>2023</year>). <article-title>Virtual meeting fatigue: exploring the impact of virtual meetings on cognitive performance and active versus passive fatigue</article-title>. <source>J. Occup. Health Psychol.</source> <volume>28</volume>, <fpage>343</fpage>&#x02013;<lpage>362</lpage>. doi: <pub-id pub-id-type="doi">10.1037/ocp0000362</pub-id><pub-id pub-id-type="pmid">37883023</pub-id></mixed-citation>
</ref>
<ref id="B67">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oh</surname> <given-names>C. S.</given-names></name> <name><surname>Bailenson</surname> <given-names>J. N.</given-names></name> <name><surname>Welch</surname> <given-names>G. F.</given-names></name></person-group> (<year>2018</year>). <article-title>A systematic review of social presence: definition, antecedents, and implications</article-title>. <source>Front. Robot. AI</source> <volume>5</volume>:<fpage>114</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frobt.2018.00114</pub-id><pub-id pub-id-type="pmid">33500993</pub-id></mixed-citation>
</ref>
<ref id="B68">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ouzzani</surname> <given-names>M.</given-names></name> <name><surname>Hammady</surname> <given-names>H.</given-names></name> <name><surname>Fedorowicz</surname> <given-names>Z.</given-names></name> <name><surname>Elmagarmid</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Rayyan-a web and mobile app for systematic reviews</article-title>. <source>Syst. Rev.</source> <volume>5</volume>:<fpage>210</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s13643-016-0384-4</pub-id><pub-id pub-id-type="pmid">27919275</pub-id></mixed-citation>
</ref>
<ref id="B69">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Page</surname> <given-names>M. J.</given-names></name> <name><surname>Moher</surname> <given-names>D.</given-names></name> <name><surname>Bossuyt</surname> <given-names>P. M.</given-names></name> <name><surname>Boutron</surname> <given-names>I.</given-names></name> <name><surname>Hoffmann</surname> <given-names>T. C.</given-names></name> <name><surname>Mulrow</surname> <given-names>C. D.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>PRISMA 2020 explanation and elaboration: updated guidance and exemplars for reporting systematic reviews</article-title>. <source>BMJ</source> <volume>372</volume>:<fpage>n160</fpage>. doi: <pub-id pub-id-type="doi">10.1136/bmj.n160</pub-id><pub-id pub-id-type="pmid">33781993</pub-id></mixed-citation>
</ref>
<ref id="B70">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Cheng</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>Z.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Hu</surname> <given-names>Y.</given-names></name></person-group> (<year>2017</year>). <article-title>Cooperation in lovers: an fNIRS-based hyperscanning study</article-title>. <source>Hum. Brain Mapp.</source> <volume>38</volume>, <fpage>831</fpage>&#x02013;<lpage>841</lpage>. doi: <pub-id pub-id-type="doi">10.1002/hbm.23421</pub-id><pub-id pub-id-type="pmid">27699945</pub-id></mixed-citation>
</ref>
<ref id="B71">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Park</surname> <given-names>J.</given-names></name> <name><surname>Shin</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name> <name><surname>Jeong</surname> <given-names>J.</given-names></name></person-group> (<year>2023</year>). <article-title>Inter-brain synchrony pattern investigation on triadic board game play-based social interaction: an fNIRS study</article-title>. <source>IEEE Trans. Neural Syst. Rehabil. Eng.</source> <volume>31</volume>, <fpage>2923</fpage>&#x02013;<lpage>2932</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TNSRE.2023.3292844</pub-id><pub-id pub-id-type="pmid">37410649</pub-id></mixed-citation>
</ref>
<ref id="B72">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pedregosa</surname> <given-names>F.</given-names></name> <name><surname>Varoquaux</surname> <given-names>G.</given-names></name> <name><surname>Gramfort</surname> <given-names>A.</given-names></name> <name><surname>Michel</surname> <given-names>V.</given-names></name> <name><surname>Thirion</surname> <given-names>B.</given-names></name> <name><surname>Grisel</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Scikit-learn: machine learning in Python</article-title>. <source>arXiv [preprint].</source> arXiv:1201.0490. doi: <pub-id pub-id-type="doi">10.48550/arXiv.1201.0490</pub-id></mixed-citation>
</ref>
<ref id="B73">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pinto-Orellana</surname> <given-names>M. A.</given-names></name> <name><surname>Khan</surname> <given-names>H.</given-names></name> <name><surname>Ombao</surname> <given-names>H.</given-names></name> <name><surname>Mirtaheri</surname> <given-names>P.</given-names></name></person-group> (<year>2024</year>). <article-title>Emerging neuroimaging approach of hybrid EEG-fNIRS recordings: data collection and analysis challenges</article-title>. <source>Data Sci. Sci</source>. <volume>3</volume>:<fpage>2426785</fpage>. doi: <pub-id pub-id-type="doi">10.1080/26941899.2024.2426785</pub-id></mixed-citation>
</ref>
<ref id="B74">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>P&#x000F6;ys&#x000E4;-Tarhonen</surname> <given-names>J.</given-names></name> <name><surname>Awwal</surname> <given-names>N.</given-names></name> <name><surname>H&#x000E4;kkinen</surname> <given-names>P.</given-names></name> <name><surname>Otieno</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Joint attention behaviour in remote collaborative problem solving: exploring different attentional levels in dyadic interaction</article-title>. <source>RPTEL</source> <volume>16</volume>:<fpage>11</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s41039-021-00160-0</pub-id></mixed-citation>
</ref>
<ref id="B75">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reinero</surname> <given-names>D. A.</given-names></name> <name><surname>Dikker</surname> <given-names>S.</given-names></name> <name><surname>van Bavel</surname> <given-names>J. J.</given-names></name></person-group> (<year>2021</year>). <article-title>Inter-brain synchrony in teams predicts collective performance</article-title>. <source>Soc. Cogn. Affect. Neurosci.</source> <volume>16</volume>, <fpage>43</fpage>&#x02013;<lpage>57</lpage>. doi: <pub-id pub-id-type="doi">10.1093/scan/nsaa135</pub-id><pub-id pub-id-type="pmid">32991728</pub-id></mixed-citation>
</ref>
<ref id="B76">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>R&#x000E9;veill&#x000E9;</surname> <given-names>C.</given-names></name> <name><surname>Vergotte</surname> <given-names>G.</given-names></name> <name><surname>Perrey</surname> <given-names>S.</given-names></name> <name><surname>Bosselut</surname> <given-names>G.</given-names></name></person-group> (<year>2024</year>). <article-title>Using interbrain synchrony to study teamwork: a systematic review and meta-analysis</article-title>. <source>Neurosci. Biobehav. Rev.</source> <volume>159</volume>:<fpage>105593</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neubiorev.2024.105593</pub-id><pub-id pub-id-type="pmid">38373643</pub-id></mixed-citation>
</ref>
<ref id="B77">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sarasso</surname> <given-names>P.</given-names></name> <name><surname>Ronga</surname> <given-names>I.</given-names></name> <name><surname>Del Fante</surname> <given-names>E.</given-names></name> <name><surname>Barbieri</surname> <given-names>P.</given-names></name> <name><surname>Lozzi</surname> <given-names>I.</given-names></name> <name><surname>Rosaia</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Physical but not virtual presence of others potentiates implicit and explicit learning</article-title>. <source>Sci. Rep.</source> <volume>12</volume>:<fpage>21205</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-022-25273-4</pub-id><pub-id pub-id-type="pmid">36481679</pub-id></mixed-citation>
</ref>
<ref id="B78">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sarasso</surname> <given-names>P.</given-names></name> <name><surname>Ronga</surname> <given-names>I.</given-names></name> <name><surname>Piovesan</surname> <given-names>F.</given-names></name> <name><surname>Barbieri</surname> <given-names>P.</given-names></name> <name><surname>Del Fante</surname> <given-names>E.</given-names></name> <name><surname>Luca</surname> <given-names>D.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Shared attention in virtual immersive reality enhances electrophysiological correlates of implicit sensory learning</article-title>. <source>Sci. Rep.</source> <volume>14</volume>:<fpage>3767</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-024-53937-w</pub-id><pub-id pub-id-type="pmid">38355691</pub-id></mixed-citation>
</ref>
<ref id="B79">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schneider</surname> <given-names>B.</given-names></name> <name><surname>Sung</surname> <given-names>G.</given-names></name> <name><surname>Chng</surname> <given-names>E.</given-names></name> <name><surname>Yang</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>How can high-frequency sensors capture collaboration? A review of the empirical links between multimodal metrics and collaborative constructs</article-title>. <source>Sensors</source> <volume>21</volume>:<fpage>8185</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s21248185</pub-id><pub-id pub-id-type="pmid">34960278</pub-id></mixed-citation>
</ref>
<ref id="B80">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sharan</surname> <given-names>N. N.</given-names></name> <name><surname>Toet</surname> <given-names>A.</given-names></name> <name><surname>Mioch</surname> <given-names>T.</given-names></name> <name><surname>Niamut</surname> <given-names>O.</given-names></name> <name><surname>van Erp</surname> <given-names>J. B. F.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;The relative importance of social cues in immersive mediated communication,&#x0201D;</article-title> in <source>Human Interaction, Emerging Technologies and Future Systems V: Proceedings of the 5th International Virtual Conference on Human Interaction and Emerging Technologies, IHIET 2021, August 27-29, 2021 and the 6th IHIET: Future Systems (IHIET-FS 2021), October 28&#x02013;30, 2021, France</source>, eds. T. Ahram and R. Taiar (Cham: Springer International Publishing; Imprint Springer), <fpage>491</fpage>&#x02013;<lpage>498</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-85540-6_62</pub-id></mixed-citation>
</ref>
<ref id="B81">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shih</surname> <given-names>Y.-T.</given-names></name> <name><surname>Wang</surname> <given-names>L.</given-names></name> <name><surname>Wong</surname> <given-names>C. H. Y.</given-names></name> <name><surname>Sin</surname> <given-names>E. L. L.</given-names></name> <name><surname>Rauterberg</surname> <given-names>M.</given-names></name> <name><surname>Yuan</surname> <given-names>Z.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>The effects of distancing design collaboration necessitated by COVID-19 on brain synchrony in teams compared to co-located design collaboration: a preliminary study</article-title>. <source>Brain Sci.</source> <volume>14</volume>:<fpage>60</fpage>. doi: <pub-id pub-id-type="doi">10.3390/brainsci14010060</pub-id><pub-id pub-id-type="pmid">38248275</pub-id></mixed-citation>
</ref>
<ref id="B82">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Snijdewint</surname> <given-names>J. A.</given-names></name> <name><surname>Scheepers</surname> <given-names>D.</given-names></name></person-group> (<year>2023</year>). <article-title>Group-based flow: the influence of cardiovascular synchronization and identifiability</article-title>. <source>Psychophysiology</source> <volume>60</volume>:<fpage>e14227</fpage>. doi: <pub-id pub-id-type="doi">10.1111/psyp.14227</pub-id><pub-id pub-id-type="pmid">36458474</pub-id></mixed-citation>
</ref>
<ref id="B83">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Solomon</surname> <given-names>D. H.</given-names></name> <name><surname>Theiss</surname> <given-names>J. A.</given-names></name></person-group> (<year>2022</year>). <source>Interpersonal Communication: Putting Theory into Practice, 2nd Edn.</source> (<publisher-loc>New York, NY; London</publisher-loc>: <publisher-name>Routledge; Taylor and Francis Group</publisher-name>). Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.taylorfrancis.com/books/mono/10.4324/9781351174381/interpersonal-communication-denise-solomon-jennifer-theiss">https://www.taylorfrancis.com/books/mono/10.4324/9781351174381/interpersonal-communication-denise-solomon-jennifer-theiss</ext-link> (Accessed September 1, 2025).</mixed-citation>
</ref>
<ref id="B84">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>&#x00160;pakov</surname> <given-names>O.</given-names></name> <name><surname>Istance</surname> <given-names>H.</given-names></name> <name><surname>R&#x000E4;ih&#x000E4;</surname> <given-names>K.-J.</given-names></name> <name><surname>Viitanen</surname> <given-names>T.</given-names></name> <name><surname>Siirtola</surname> <given-names>H.</given-names></name></person-group> (<year>2019</year>). <article-title>&#x0201C;Eye gaze and head gaze in collaborative games,&#x0201D;</article-title> in <source>Proceedings of the 11th ACM Symposium on Eye Tracking Research and Applications. Denver, CO, 25&#x02013;28 June 2019</source> (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3317959.3321489</pub-id></mixed-citation>
</ref>
<ref id="B85">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Strang</surname> <given-names>A. J.</given-names></name> <name><surname>Funke</surname> <given-names>G. J.</given-names></name> <name><surname>Russell</surname> <given-names>S. M.</given-names></name> <name><surname>Dukes</surname> <given-names>A. W.</given-names></name> <name><surname>Middendorf</surname> <given-names>M. S.</given-names></name></person-group> (<year>2014</year>). <article-title>Physio-behavioral coupling in a cooperative team task: contributors and relations</article-title>. <source>J. Exp. Psychol. Hum. Percept. Perform.</source> <volume>40</volume>, <fpage>145</fpage>&#x02013;<lpage>158</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0033125</pub-id><pub-id pub-id-type="pmid">23750969</pub-id></mixed-citation>
</ref>
<ref id="B86">
<mixed-citation publication-type="web"><collab>Streamlit Inc</collab>. (<year>2025</year>). <source>Streamlit: The Fastest Way to Build and Share Data Apps</source>. <collab>Streamlit Inc</collab>.Available online at: <ext-link ext-link-type="uri" xlink:href="https://streamlit.io">https://streamlit.io</ext-link> (Accessed January 29, 2026).</mixed-citation>
</ref>
<ref id="B87">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stuldreher</surname> <given-names>I. V.</given-names></name> <name><surname>Thammasan</surname> <given-names>N.</given-names></name> <name><surname>van Erp</surname> <given-names>J. B. F.</given-names></name> <name><surname>Brouwer</surname> <given-names>A.-M.</given-names></name></person-group> (<year>2020</year>). <article-title>Physiological synchrony in EEG, electrodermal activity and heart rate detects attentionally relevant events in time</article-title>. <source>Front. Neurosci.</source> <volume>14</volume>:<fpage>575521</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2020.575521</pub-id><pub-id pub-id-type="pmid">33343277</pub-id></mixed-citation>
</ref>
<ref id="B88">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Szymanski</surname> <given-names>C.</given-names></name> <name><surname>Pesquita</surname> <given-names>A.</given-names></name> <name><surname>Brennan</surname> <given-names>A. A.</given-names></name> <name><surname>Perdikis</surname> <given-names>D.</given-names></name> <name><surname>Enns</surname> <given-names>J. T.</given-names></name> <name><surname>Brick</surname> <given-names>T. R.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Teams on the same wavelength perform better: inter-brain phase synchronization constitutes a neural substrate for social facilitation</article-title>. <source>NeuroImage</source> <volume>152</volume>, <fpage>425</fpage>&#x02013;<lpage>436</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2017.03.013</pub-id><pub-id pub-id-type="pmid">28284802</pub-id></mixed-citation>
</ref>
<ref id="B89">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Velletaz</surname> <given-names>T.</given-names></name> <name><surname>Janaqi</surname> <given-names>S.</given-names></name> <name><surname>Harispe</surname> <given-names>S.</given-names></name> <name><surname>Lagarde</surname> <given-names>J.</given-names></name> <name><surname>Guyot</surname> <given-names>P.</given-names></name></person-group> (<year>2025</year>). <article-title>A review of human synchronization datasets</article-title>. <source>IEEE Access</source> <volume>13</volume>, <fpage>67269</fpage>&#x02013;<lpage>67285</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2025.3560424</pub-id></mixed-citation>
</ref>
<ref id="B90">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>M.-Y.</given-names></name> <name><surname>Luan</surname> <given-names>P.</given-names></name> <name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Xiang</surname> <given-names>Y.-T.</given-names></name> <name><surname>Niu</surname> <given-names>H.</given-names></name> <name><surname>Yuan</surname> <given-names>Z.</given-names></name></person-group> (<year>2018</year>). <article-title>Concurrent mapping of brain activation from multiple subjects during social interaction by hyperscanning: a mini-review</article-title>. <source>Quant. Imag. Med. Surg.</source> <volume>8</volume>, <fpage>819</fpage>&#x02013;<lpage>837</lpage>. doi: <pub-id pub-id-type="doi">10.21037/qims.2018.09.07</pub-id><pub-id pub-id-type="pmid">30306062</pub-id></mixed-citation>
</ref>
<ref id="B91">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>Z.</given-names></name> <name><surname>Ng</surname> <given-names>J. T. D.</given-names></name> <name><surname>Que</surname> <given-names>Y.</given-names></name> <name><surname>Hu</surname> <given-names>X.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;Unveiling synchrony of learners&#x00027; multimodal data in collaborative maker activities,&#x0201D;</article-title> in <source>Proceedings of the 14th Learning Analytics and Knowledge Conference, Kyoto, Japan, 18&#x02013;22 March 2024</source> (<publisher-loc>Kyoto</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>), <fpage>922</fpage>&#x02013;<lpage>928</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3636555.3636935</pub-id></mixed-citation>
</ref>
<ref id="B92">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Wikstr&#x000F6;m</surname> <given-names>V.</given-names></name></person-group> (<year>2022</year>). <source>Intersubjectivity and Cooperation in Synchronous Computer-Mediated Interaction</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://helda.helsinki.fi/bitstreams/408ae468-880e-41c0-b757-708ab02d9dae/download">https://helda.helsinki.fi/bitstreams/408ae468-880e-41c0-b757-708ab02d9dae/download</ext-link> (Accessed September 1, 2025).</mixed-citation>
</ref>
<ref id="B93">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wikstr&#x000F6;m</surname> <given-names>V.</given-names></name> <name><surname>Falcon</surname> <given-names>M.</given-names></name> <name><surname>Martikainen</surname> <given-names>S.</given-names></name> <name><surname>Pejoska</surname> <given-names>J.</given-names></name> <name><surname>Durall</surname> <given-names>E.</given-names></name> <name><surname>Bauters</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Heart rate sharing at the workplace</article-title>. <source>Multimodal Technol. Interact.</source> <volume>5</volume>:<fpage>60</fpage>. doi: <pub-id pub-id-type="doi">10.3390/mti5100060</pub-id></mixed-citation>
</ref>
<ref id="B94">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wikstr&#x000F6;m</surname> <given-names>V.</given-names></name> <name><surname>Saarikivi</surname> <given-names>K.</given-names></name> <name><surname>Falcon</surname> <given-names>M.</given-names></name> <name><surname>Makkonen</surname> <given-names>T.</given-names></name> <name><surname>Martikainen</surname> <given-names>S.</given-names></name> <name><surname>Putkinen</surname> <given-names>V.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Inter-brain synchronization occurs without physical co-presence during cooperative online gaming</article-title>. <source>Neuropsychologia</source> <volume>174</volume>:<fpage>108316</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2022.108316</pub-id><pub-id pub-id-type="pmid">35810882</pub-id></mixed-citation>
</ref>
<ref id="B95">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wisiecka</surname> <given-names>K.</given-names></name> <name><surname>Konishi</surname> <given-names>Y.</given-names></name> <name><surname>Krejtz</surname> <given-names>K.</given-names></name> <name><surname>Zolfaghari</surname> <given-names>M.</given-names></name> <name><surname>Kopainsky</surname> <given-names>B.</given-names></name> <name><surname>Krejtz</surname> <given-names>I.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Supporting complex decision-making: evidence from an eye tracking study on in-person and remote collaboration</article-title>. <source>ACM Trans. Comput. Hum. Interact.</source> <volume>30</volume>, <fpage>1</fpage>&#x02013;<lpage>27</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3581787</pub-id></mixed-citation>
</ref>
<ref id="B96">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wood</surname> <given-names>D. J.</given-names></name> <name><surname>Gray</surname> <given-names>B.</given-names></name></person-group> (<year>1991</year>). <article-title>Toward a comprehensive theory of collaboration</article-title>. <source>J. Appl. Behav. Sci.</source> <volume>27</volume>, <fpage>139</fpage>&#x02013;<lpage>162</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0021886391272001</pub-id></mixed-citation>
</ref>
<ref id="B97">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Du</surname> <given-names>X.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Xue</surname> <given-names>C.</given-names></name></person-group> (<year>2025</year>). <article-title>Influence of design interaction modes on conceptual design behavior and inter-brain synchrony in designer teams: a fNIRS hyperscanning study</article-title>. <source>Adv. Eng. Inform.</source> <volume>65</volume>:<fpage>103223</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.aei.2025.103223</pub-id></mixed-citation>
</ref>
<ref id="B98">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>Y. J.</given-names></name> <name><surname>Antone</surname> <given-names>B.</given-names></name> <name><surname>DeChurch</surname> <given-names>L.</given-names></name> <name><surname>Contractor</surname> <given-names>N.</given-names></name></person-group> (<year>2023</year>). <article-title>Information sharing in a hybrid workplace: understanding the role of ease-of-use perceptions of communication technologies in advice-seeking relationship maintenance</article-title>. <source>J. Comput.-Mediat. Commun.</source> <volume>28</volume>:<fpage>zmad025</fpage>. doi: <pub-id pub-id-type="doi">10.1093/jcmc/zmad025</pub-id></mixed-citation>
</ref>
<ref id="B99">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yamaya</surname> <given-names>N.</given-names></name> <name><surname>Inagaki</surname> <given-names>H.</given-names></name> <name><surname>Shimizu</surname> <given-names>Y.</given-names></name> <name><surname>Mitsui</surname> <given-names>S.</given-names></name> <name><surname>Hirao</surname> <given-names>K.</given-names></name> <name><surname>Kikuchi</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Video communication mitigate feelings of friendliness: a functional near-infrared spectroscopy study</article-title>. <source>Neuroimage.</source> <volume>309</volume>:<fpage>121086</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2025.121086</pub-id><pub-id pub-id-type="pmid">39956411</pub-id></mixed-citation>
</ref>
<ref id="B100">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>Y.</given-names></name> <name><surname>Dwyer</surname> <given-names>T.</given-names></name> <name><surname>Swiecki</surname> <given-names>Z.</given-names></name> <name><surname>Lee</surname> <given-names>B.</given-names></name> <name><surname>Wybrow</surname> <given-names>M.</given-names></name> <name><surname>Cordeil</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Desktop versus VR for collaborative sensemaking</article-title>. <source>Front. Virtual Reality</source> <volume>6</volume>:<fpage>1570383</fpage>. doi: <pub-id pub-id-type="doi">10.3389/frvir.2025.1570383</pub-id></mixed-citation>
</ref>
<ref id="B101">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zamm</surname> <given-names>A.</given-names></name> <name><surname>Loehr</surname> <given-names>J. D.</given-names></name> <name><surname>Vesper</surname> <given-names>C.</given-names></name> <name><surname>Konvalinka</surname> <given-names>I.</given-names></name> <name><surname>Kappel</surname> <given-names>S. L.</given-names></name> <name><surname>Heggli</surname> <given-names>O. A.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>A practical guide to EEG hyperscanning in joint action research: from motivation to implementation</article-title>. <source>Soc. Cogn. Affect. Neurosci.</source> <volume>19</volume>:<fpage>nsae026</fpage>. doi: <pub-id pub-id-type="doi">10.1093/scan/nsae026</pub-id><pub-id pub-id-type="pmid">38584414</pub-id></mixed-citation>
</ref>
<ref id="B102">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>D.</given-names></name> <name><surname>Lin</surname> <given-names>Y.</given-names></name> <name><surname>Jing</surname> <given-names>Y.</given-names></name> <name><surname>Feng</surname> <given-names>C.</given-names></name> <name><surname>Gu</surname> <given-names>R.</given-names></name></person-group> (<year>2019</year>). <article-title>The dynamics of belief updating in human cooperation: findings from inter-brain ERP hyperscanning</article-title>. <source>NeuroImage</source> <volume>198</volume>, <fpage>1</fpage>&#x02013;<lpage>12</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2019.05.029</pub-id><pub-id pub-id-type="pmid">31085300</pub-id></mixed-citation>
</ref>
<ref id="B103">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name> <name><surname>Long</surname> <given-names>Y.</given-names></name> <name><surname>Jiang</surname> <given-names>Y.</given-names></name> <name><surname>Lu</surname> <given-names>C.</given-names></name></person-group> (<year>2023a</year>). <article-title>Interpersonal neural synchronization underlies mnemonic similarity during collaborative remembering</article-title>. <source>Neuropsychologia</source> <volume>191</volume>:<fpage>108732</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuropsychologia.2023.108732</pub-id><pub-id pub-id-type="pmid">37951386</pub-id></mixed-citation>
</ref>
<ref id="B104">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Yang</surname> <given-names>J.</given-names></name> <name><surname>Ni</surname> <given-names>J.</given-names></name> <name><surname>de Dreu</surname> <given-names>C. K. W.</given-names></name> <name><surname>Ma</surname> <given-names>Y.</given-names></name></person-group> (<year>2023b</year>). <article-title>Leader-follower behavioural coordination and neural synchronization during intergroup conflict</article-title>. <source>Nat. Hum. Behav.</source> <volume>7</volume>, <fpage>2169</fpage>&#x02013;<lpage>2181</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41562-023-01663-0</pub-id><pub-id pub-id-type="pmid">37500783</pub-id></mixed-citation>
</ref>
<ref id="B105">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Leong</surname> <given-names>C.</given-names></name> <name><surname>Mao</surname> <given-names>Y.</given-names></name> <name><surname>Yuan</surname> <given-names>Z.</given-names></name></person-group> (<year>2024</year>). <article-title>Bridging stories and science: an fNIRS-based hyperscanning investigation into child learning in STEM</article-title>. <source>NeuroImage</source> <volume>285</volume>:<fpage>120486</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2023.120486</pub-id><pub-id pub-id-type="pmid">38070436</pub-id></mixed-citation>
</ref>
<ref id="B106">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Qiu</surname> <given-names>L.</given-names></name> <name><surname>Tang</surname> <given-names>F.</given-names></name> <name><surname>Sun</surname> <given-names>H.-J.</given-names></name></person-group> (<year>2023c</year>). <article-title>Gender differences in cognitive and affective interpersonal emotion regulation in couples: an fNIRS hyperscanning</article-title>. <source>Soc. Cogn. Affect. Neurosci.</source> 18. doi: <pub-id pub-id-type="doi">10.1093/scan/nsad057</pub-id></mixed-citation>
</ref>
<ref id="B107">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>N.</given-names></name> <name><surname>Zhang</surname> <given-names>X.</given-names></name> <name><surname>Noah</surname> <given-names>J. A.</given-names></name> <name><surname>Tiede</surname> <given-names>M.</given-names></name> <name><surname>Hirsch</surname> <given-names>J.</given-names></name></person-group> (<year>2023</year>). <article-title>Separable processes for live &#x0201C;in-person&#x0201D; and live &#x0201C;zoom-like&#x0201D; faces</article-title>. <source>Imag. Neurosci.</source> <volume>1</volume>, <fpage>1</fpage>&#x02013;<lpage>17</lpage>. doi: <pub-id pub-id-type="doi">10.1162/imag_a_00027</pub-id><pub-id pub-id-type="pmid">40799691</pub-id></mixed-citation>
</ref>
<ref id="B108">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>X.</given-names></name> <name><surname>Pan</surname> <given-names>Y.</given-names></name> <name><surname>Zhang</surname> <given-names>R.</given-names></name> <name><surname>Bei</surname> <given-names>L.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name></person-group> (<year>2021</year>). <article-title>Mortality threat mitigates interpersonal competition: an EEG-based hyperscanning study</article-title>. <source>Soc. Cogn. Affect. Neurosci.</source> <volume>16</volume>, <fpage>621</fpage>&#x02013;<lpage>631</lpage>. doi: <pub-id pub-id-type="doi">10.1093/scan/nsab033</pub-id><pub-id pub-id-type="pmid">33755182</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/240441/overview">Giacinto Barresi</ext-link>, University of the West of England, United Kingdom</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3309469/overview">Grace Qiyuan Miao</ext-link>, University of California, Los Angeles, United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3312926/overview">Pyatin Vasilii</ext-link>, Samara State Medical University, Russia</p>
</fn>
</fn-group>
<fn-group>
<fn fn-type="abbr" id="abbr1"><label>Abbreviations:</label><p>ECG, electrocardiography; EDA, electrodermal activity; EEG, electroencephalography; fMRI, functional magnetic resonance imaging; fNIRS, functional near-infrared spectroscopy; IBS, inter-brain synchrony; MEG, magnetoencephalography; PICOS, population, intervention, comparison, outcomes, study design; PRISMA, preferred reporting items for systematic reviews and meta-analyses; SVM, support vector machine.</p></fn></fn-group>
</back>
</article>