<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="brief-report">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Dev. Psychol.</journal-id>
<journal-title>Frontiers in Developmental Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Dev. Psychol.</abbrev-journal-title>
<issn pub-type="epub">2813-7779</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdpys.2024.1411276</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Developmental Psychology</subject>
<subj-group>
<subject>Brief Research Report</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Investigating the role of verbal cues on learning of tool-use actions in 18- and 24-month-olds in an online looking time experiment</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Trouillet</surname> <given-names>L&#x000E9;onie</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2705624/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Bothe</surname> <given-names>Ricarda</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2801388/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Mani</surname> <given-names>Nivedita</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/55468/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Elsner</surname> <given-names>Birgit</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/18315/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Developmental Psychology, University of Potsdam</institution>, <addr-line>Potsdam</addr-line>, <country>Germany</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department for Psychology of Language, Georg-Elias-M&#x000FC;ller-Institut f&#x000FC;r Psychologie, Georg-August University Goettingen</institution>, <addr-line>Goettingen</addr-line>, <country>Germany</country></aff>
<aff id="aff3"><sup>3</sup><institution>Leibniz ScienceCampus &#x0201C;Primate Cognition&#x0201D;</institution>, <addr-line>Goettingen</addr-line>, <country>Germany</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Nayeli Gonzalez-Gomez, Oxford Brookes University, United Kingdom</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Yifei He, University of Marburg, Germany</p>
<p>Jane S. Herbert, University of Wollongong, Australia</p></fn>
<corresp id="c001">&#x0002A;Correspondence: L&#x000E9;onie Trouillet <email>leonie.trouillet&#x00040;uni-potsdam.de</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>02</day>
<month>08</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>2</volume>
<elocation-id>1411276</elocation-id>
<history>
<date date-type="received">
<day>02</day>
<month>04</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>17</day>
<month>07</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2024 Trouillet, Bothe, Mani and Elsner.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Trouillet, Bothe, Mani and Elsner</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>This study was an unmoderated online experiment to investigate the impact of the semantic content of verbal cues on toddlers&#x00027; action learning. 18- and 24-month-olds (<italic>N</italic> = 89) watched videos of two tool-use actions accompanied by specific (&#x0201C;pressing in/pulling out&#x0201D;) or unspecific information (&#x0201C;doing that&#x0201D;). Learning was measured via looking times coded from webcam recordings. Regardless of age and verbal cue, toddlers looked equally long to test pictures of correct or incorrect tool-use, suggesting that meaningful verbal information did not improve the challenging video-based action learning. However, low drop-out rates and high webcam data quality confirm the feasibility of online experiments with toddlers.</p></abstract>
<kwd-group>
<kwd>tool-use</kwd>
<kwd>action learning</kwd>
<kwd>language</kwd>
<kwd>online study</kwd>
<kwd>development</kwd>
</kwd-group>
<contract-num rid="cn001">EL 253/7-2</contract-num>
<contract-sponsor id="cn001">Deutsche Forschungsgemeinschaft<named-content content-type="fundref-id">10.13039/501100001659</named-content></contract-sponsor>
<counts>
<fig-count count="2"/>
<table-count count="2"/>
<equation-count count="0"/>
<ref-count count="34"/>
<page-count count="7"/>
<word-count count="5702"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Development in Infancy</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>Introduction</title>
<p>Toddlers learn through observation of others. Although most everyday action learning takes place in live settings with face-to-face interaction, toddlers&#x00027; exposure to television significantly increased over the last decades (Rideout, <xref ref-type="bibr" rid="B25">2013</xref>), and screen time further rose during the COVID-19 pandemic and the associated lockdowns (Kahn et al., <xref ref-type="bibr" rid="B15">2021</xref>; Bergmann et al., <xref ref-type="bibr" rid="B6">2022</xref>). With the wide availability of screens and mobile devices, screen exposure will become even more prominent (Barr et al., <xref ref-type="bibr" rid="B3">2020</xref>), providing toddlers with opportunities but also challenges to learn from video. This study aimed to examine toddlers&#x00027; abilities to learn tool-use actions from video demonstrations in their naturalistic home setting, taking into consideration the potentially supportive influence of verbal information provided by the experimenter.</p>
<p>Research investigating action learning from video consistently points toward a video deficit effect (Barr, <xref ref-type="bibr" rid="B2">2010</xref>; Strouse and Samson, <xref ref-type="bibr" rid="B31">2021</xref>), that is, toddlers learn less from videos than live demonstrations. This effect is however mediated by communicative information. When action demonstrations were accompanied by pedagogical cues (i.e., the experimenter narrating the action while looking at the object or audience), there was no difference in 15- and 18-month-olds&#x00027; imitation between live and video demonstrations (Lauricella et al., <xref ref-type="bibr" rid="B17">2016</xref>). Furthermore, naturalistic descriptions of action steps derived from mothers&#x00027; narration style improved imitation from video demonstrations in 18-month-olds compared to empty speech (Seehagen and Herbert, <xref ref-type="bibr" rid="B28">2010</xref>). We aimed to extend this research to online learning of complex tool-use actions, where a learner must associate the functionally relevant properties of a tool with a specific movement to achieve an intended effect (Hernik and Csibra, <xref ref-type="bibr" rid="B13">2009</xref>).</p>
<p>The basis for the online study was a laboratory-based study (Trouillet et al., <xref ref-type="bibr" rid="B33">2024</xref>), where 18- and 24-month-olds observed live demonstrations of tool-use actions, accompanied by verbal information either labeling the tool with a pseudo-noun (Tanu/L&#x000F6;ki) and the action with a pseudo-verb (silling lupp/fapsing eel) or a meaningful verb (pressing in/pulling out; specific cue condition), or by empty speech (&#x0201C;With this, I doing that.&#x0201D;, unspecific cue condition). Toddlers performed more correct imitative actions when tools or actions were labeled than in the empty speech condition, with no difference between groups that heard the meaningful verbs and the pseudo-verbs. Thus, different labels for tools and actions&#x02013;and not the semantic content&#x02013;seemed to facilitate action learning, maybe by highlighting differences between the two tools or the two demonstrated actions.</p>
<p>In light of the growing availability and significance of digital content from a young age, we were interested if different labels for tools and actions would also help toddlers to overcome their difficulties in learning from video and to learn these more complex tool-use actions from videos watched at home. For this purpose, we adapted the tool-use actions from our imitation study (Trouillet et al., <xref ref-type="bibr" rid="B33">2024</xref>) to be suitable for video presentations on a small screen. In line with other online studies during the COVID-19 pandemic that examined toddlers&#x00027; development and behavior in their natural home settings (Tsuji et al., <xref ref-type="bibr" rid="B34">2022</xref>), we tested toddlers at home through an unmoderated online experiment and measured their action learning through looking times. This study expands the use of preferential looking time measurements in online studies, which have previously been used to capture visual preferences (Nelson and Oakes, <xref ref-type="bibr" rid="B21">2021</xref>), word recognition (Bacon et al., <xref ref-type="bibr" rid="B1">2021</xref>), and matching emotional utterances to corresponding pictures (Smith-Flores et al., <xref ref-type="bibr" rid="B29">2022</xref>), by applying them to action learning.</p>
<p>In the current study, 18- and 24-month-olds watched videos featuring an experimenter demonstrating two actions using different tools. To potentially enhance learning from video, the experimenter labeled the tool-action associations for one group of toddlers (specific cue) and provided verbal information that did not differentiate between the actions for the second group (unspecific cue). At test, toddlers were shown still-frames displaying correct and incorrect tool-use side-by-side (<xref ref-type="fig" rid="F1">Figure 1C</xref>). We assessed their action learning by analyzing webcam recordings of toddlers&#x00027; looking times at the pictures. Previous studies have shown that toddlers look longer at the part of a dual-ended tool that is incongruent with an actor&#x00027;s goal (N&#x000ED; Choisdealbha et al., <xref ref-type="bibr" rid="B20">2016</xref>) and that infants look longer at unexpected outcomes of a tool-use action (Hernik and Csibra, <xref ref-type="bibr" rid="B14">2015</xref>). Based on these findings, we took longer looking times to the incorrect than to the correct tool use pictures as an indicator of the toddlers&#x00027; action learning. Given that specific verbal information has been shown to benefit toddlers&#x00027; learning of tool-use actions from live demonstrations (Trouillet et al., <xref ref-type="bibr" rid="B33">2024</xref>), we expected to find a larger difference in looking times between the two pictures in the specific cue condition than in the unspecific cue condition. Furthermore, we expected this impact of the verbal cues to be more pronounced in 24-month-olds due to their advanced language development (see Gampe and Daum, <xref ref-type="bibr" rid="B12">2014</xref>).</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>Exemplary still frames of learning phase videos and test trial pictures. <bold>(A)</bold> Depicts the action with the pressing tool, <bold>(B)</bold> with the pulling tool. When the action is completed (i.e., the yellow circle reaches the opening on the right side), there is a blinking light and a sound effect. <bold>(C)</bold> Depicts two exemplary test trials, with a correct and incorrect tool-use picture presented side-by-side. Left: correct use of the pulling tool (right picture), incorrect use of the pressing tool. Right: Correct use of the pressing tool (right picture), incorrect use of the pulling tool. The identity of the correctly used tool and the position of the correct test picture were varied across four test trials per participant.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdpys-02-1411276-g0001.tif"/>
</fig>
</sec>
<sec sec-type="materials and methods" id="s2">
<title>Materials and methods</title>
<p>We preregistered the methods and analysis at <ext-link ext-link-type="uri" xlink:href="https://osf.io/txqyz">https://osf.io/txqyz</ext-link>, and the data are openly available at <ext-link ext-link-type="uri" xlink:href="https://osf.io/7stb8/">https://osf.io/7stb8/</ext-link>. The final sample included 89 full-term German-speaking toddlers: forty-four 18-month-olds (<italic>M</italic> = 17.73 months, <italic>SD</italic> = 0.59, <italic>range</italic> = 17&#x02013;19, 21 girls) and forty-five 24-month-olds (<italic>M</italic> = 23.67 months, <italic>SD</italic> = 0.77, <italic>range</italic> = 22&#x02013;26; 27 girls). An equal number of toddlers per age group was randomly assigned to the two verbal cue conditions (<italic>n</italic> = 22; with one additional 24-month-old toddler in the unspecific verbal cue condition). We based the sample size on prior research that investigated the role of verbal information in action learning (<italic>n</italic> = 22&#x02013;26 children per group; Lauricella et al., <xref ref-type="bibr" rid="B17">2016</xref>; Patzwald and Elsner, <xref ref-type="bibr" rid="B23">2019</xref>), as well as on a similar, still ongoing imitation study conducted in our lab. Fourteen additional toddlers were excluded due to technical issues (<italic>n</italic> = 11; bad quality of the webcam recording, toddler not visible, black recording), parental interference (<italic>n</italic> = 1), missing age information (<italic>n</italic> = 1), and toddler&#x00027;s participation in a similar study in our lab (<italic>n</italic> = 1). Most families were recruited via phone call from databases from two German infant research labs, although a few families were recruited via a website with study links for infant and children online studies in Germany (KinderSchaffenWissen). Parents gave informed consent specific to online data collection, data protection and storage at the beginning of the experiment, and the study and those procedures were approved by the local ethics committee.</p>
<p>The experiment was conducted using LabVanced (Finger et al., <xref ref-type="bibr" rid="B10">2017</xref>) and families participated from home using their personal computer or laptop. For the webcam recordings, parents were instructed to place their toddler onto their lap and make sure that their toddler was visible in the recoding. Toddlers first watched videos of a female experimenter demonstrating two tool-use actions on an effect box (<italic>learning phase</italic>, <xref ref-type="fig" rid="F1">Figures 1A</xref>, <xref ref-type="fig" rid="F1">B</xref>), followed by a <italic>test phase</italic> with four trials presenting two pictures side-by-side (<xref ref-type="fig" rid="F1">Figure 1C</xref>). A video with an exemplary walk-through of the experiment from a participant&#x00027;s point-of-view is available on <ext-link ext-link-type="uri" xlink:href="https://osf.io/7stb8/">https://osf.io/7stb8/</ext-link>. The effect box had one opening in which a pressing tool (colored stick, <xref ref-type="fig" rid="F1">Figure 1A</xref>) could be inserted; and one hook in which a pulling tool (differently colored stick with a loop, <xref ref-type="fig" rid="F1">Figure 1B</xref>) could be hooked. There were two sets of tools with switched colors (red and blue), and toddlers were randomly assigned to one of the sets. The videos of the learning phase started with a complete view of the experimenter providing a verbal cue for the first action (<xref ref-type="table" rid="T1">Table 1</xref>) for 9 s, followed by a close-up showing the respective action demonstration with the first tool (i.e., pressing or pulling; 4 s) ending with an action effect (appearance of a blinking star on a yellow circle together with a ringing sound). Then the experimenter provided a shortened version of the same verbal cue in full view (5 s), followed by another close-up action demonstration. This was repeated once, leading to a total duration of 31 s per video. After viewing an attention getter, toddlers watched the video presenting the second action with the other tool. Toddlers were randomly assigned to one of the orders of videos (pressing/pulling first). Toddlers were very attentive during the learning phase, with a mean looking time per video toward the screen of 30.84 s (99.5% of total duration of learning videos, <italic>SD</italic> = 1.23).</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Verbal cues during the learning phase.</p></caption>
<table frame="box" rules="all">
<tbody>
<tr>
<td valign="top" align="left">Specific verbal cue</td>
<td valign="top" align="left">&#x0201C;<italic>Look, this is the Tanu/L&#x000F6;ki. With the Tanu/L&#x000F6;ki, I want to press in/pull out</italic>. <italic><bold>Look, with the Tanu/L&#x000F6;ki I am pressing in/pulling out</bold></italic>.&#x0201D;</td>
</tr>
<tr>
<td valign="top" align="left">Unspecific verbal cue</td>
<td valign="top" align="left">&#x0201C;<italic>Look at this. With this, I want to do that</italic>. <italic><bold>Look, with this, I am doing that</bold></italic>.&#x0201D;</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The cues are translated from German. The complete cue is given before the first action demonstration. The last sentence (bold) is repeated before the second and third demonstrations of the respective action. Toddlers hear the cues and watch three demonstrations of each of the two tool-use actions during the learning phase. Toddlers in the specific cue condition were randomly assigned to one of the two possible parings of labels and tools (Tanu-pressing/L&#x000F6;ki-pulling or Tanu-pulling/L&#x000F6;ki pressing).</p>
</table-wrap-foot>
</table-wrap>
<p>After the learning phase, toddlers were presented with four test trials (10 s each), each presenting two pictures side-by-side: one depicting one tool being used at the correct location of the effect box (= <italic>correct tool-use</italic>), the other depicting the other tool being used at the same (but for this tool incorrect) location (= <italic>incorrect tool-use</italic>, <xref ref-type="fig" rid="F1">Figure 1C</xref>). Toddlers were randomly assigned to one of four test trial set-ups that varied the order of picture sets and within these sets the identity of the correctly used tool and the left or right position of the correct tool-use picture (<xref ref-type="supplementary-material" rid="SM1">Supplementary Table 1</xref>). Each toddler saw the pressing and the pulling tool used incorrectly twice. Toddlers&#x00027; attention to the static pictures was quite high, with average looking times of 7.66 s (76.6%; <italic>SD</italic> = 1.09) to both pictures in each trial. After the test phase, parents were asked to indicate on a 5-point-Likert scale from 1 (very bad) to 5 (very good) how their child liked the participation in the experiment. On average, parents indicated that their child enjoyed participating (<italic>n</italic> = 86, <italic>M</italic> = 4.15, <italic>SD</italic> = 0.78). To ensure that toddlers understood the presented specific verbal cues, we also collected parental reports on their child&#x00027;s understanding of the German words <italic>ziehen, dr&#x000FC;cken, raus, rein</italic> (i.e., pulling, pressing, out, in; <xref ref-type="table" rid="T1">Table 1</xref>). Parents reported that children understood the majority of the presented words (18-month-olds: <italic>M</italic> = 3.41, <italic>SD</italic> = 0.85; 24-month-olds: <italic>M</italic> = 3.59, <italic>SD</italic> = 0.8).</p>
<p>One person coded toddlers&#x00027; looking behavior in the four test trials to the left or the right picture manually and frame-by-frame from the webcam recordings. A second coder re-coded the recordings of 25% of the toddlers, and interrater reliability was excellent (<italic>ICC</italic> = 0.93). Coders were unaware of the language condition under which the toddlers were tested. We needed to exclude only few test trials with a disturbance (<italic>n</italic> = 4), in which toddlers moved a lot (<italic>n</italic> = 3), or looked at the screen for &#x0003C; 2 s (<italic>n</italic> = 4). This left <italic>n</italic> = 345 test trials (97%) for analysis. Quality of webcam recordings was thus very satisfactory. For each test trial, we calculated proportional target looking time by diving the looking time to the picture showing the <italic>incorrect tool-use</italic> by the total looking time toward both pictures. Data were analyzed in <italic>R</italic> (version 4.2.2, R Core Team, <xref ref-type="bibr" rid="B24">2019</xref>) by means of a linear mixed effect model (lme4, version 1.1.29, Bates et al., <xref ref-type="bibr" rid="B5">2015</xref>) with <italic>proportional target looking time</italic> as dependent variable, verbal cue and age group (and their interaction) as fixed effects, and participant-ID as a random effect. We compared the full model with an intercept-only model. In addition, we averaged the proportional target looking times across the four test trials for each participant and tested the four experimental groups against 0.5 with one-sample <italic>t</italic>-tests (Bonferroni-Holm corrected), to determine whether toddlers&#x00027; looking behavior deviated from chance. All tests were two-tailed and alpha was set at <italic>p</italic> &#x0003C; 0.05. Given the null results obtained from our initial analyses, we calculated Bayes factors to provide additional evidence regarding the null hypothesis, using the <italic>BayesFactor</italic> package (version 0.9.12-4.7, Morey and Rouder, <xref ref-type="bibr" rid="B18">2024</xref>) with the default JZS prior. We originally planned (and pre-registered) to also analyze whether toddlers&#x00027; first look was directed toward the correct or incorrect tool-use picture. However, we realized that it was difficult for a participant to recognize whether the tools were used correctly or incorrectly on the test trial pictures through peripheral vision alone. We therefore excluded the analyses of the direction of the first look from our study, as we deemed it not reliable for measuring action learning.</p>
</sec>
<sec sec-type="results" id="s3">
<title>Results</title>
<p>We tested proportional looking time across the four test trials (<xref ref-type="fig" rid="F2">Figure 2</xref>) against 0.5 (i.e., chance) and applied Bonferroni-Holm correction to the <italic>p</italic>-values. Toddlers&#x00027; looking behavior in all groups did not differ from chance, 18 months specific cue: <italic>t</italic><sub>(21)</sub> = 0.22, <italic>p</italic><sub>adj</sub> = 1, <italic>BF</italic><sub>10</sub> = 0.23; 18 months unspecific cue: <italic>t</italic><sub>(21)</sub> = &#x02212;1.1, <italic>p</italic><sub>adj</sub> = 1, <italic>BF</italic><sub>10</sub> = 0.38; 24 months specific cue: <italic>t</italic><sub>(21)</sub> = 1.11, <italic>p</italic><sub>adj</sub> = 1, <italic>BF</italic><sub>10</sub> = 0.38; 24 months unspecific cue: <italic>t</italic><sub>(22)</sub> = &#x02212;0.1, <italic>p</italic><sub>adj</sub> = 1, <italic>BF</italic><sub>10</sub> = 0.22. This indicates that the toddlers looked for a similar amount of time to the pictures depicting incorrect and correct tool-use. Bayesian one-sample <italic>t</italic>-tests confirmed anecdotal to moderate evidence for the null hypothesis (chance level), with Bayes factors <italic>BF</italic><sub>10</sub> ranging from 0.22 to 0.38. Furthermore, the linear mixed model on proportional looking time during the test trials comprising age group, verbal cue, and their interaction (<xref ref-type="table" rid="T2">Table 2</xref>) did not show a significant improvement over the intercept-only model, &#x003C7;<sup>2</sup>(3) = 1.81, <italic>p</italic> = 0.61 (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table 2</xref> for model comparison statistics). Finally, a Bayes factor analysis comparing the full model with the intercept-only model also indicated strong evidence for the latter, <italic>BF</italic><sub>10</sub> = 0.006.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Box-plots with individual data points for proportional target looking time toward the incorrect tool use picture, averaged across the four test trials. The larger black dot represents the group mean.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fdpys-02-1411276-g0002.tif"/>
</fig>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Model estimates for the proportional looking time.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th/>
<th valign="top" align="center"><bold>Estimate</bold></th>
<th valign="top" align="center"><bold>SE</bold></th>
<th valign="top" align="center"><bold><italic>df</italic></bold></th>
<th valign="top" align="center"><bold><italic>t</italic></bold></th>
<th valign="top" align="center"><bold><italic>p</italic></bold></th>
</tr>
</thead>
<tbody>
<tr style="background-color:#dee1e1">
<td valign="top" align="left" colspan="6"><bold>PLT</bold> &#x0007E;<bold>Verbal Cue</bold> <sup>&#x0002A;</sup><bold>Age Group</bold> &#x0002B; <bold>(1|ID)</bold></td>
</tr>
<tr>
<td valign="top" align="left">Intercept</td>
<td valign="top" align="center">0.502</td>
<td valign="top" align="center">0.009</td>
<td valign="top" align="center">341</td>
<td valign="top" align="center">54.045</td>
<td valign="top" align="center">&#x0003C; 0.001</td>
</tr>
<tr>
<td valign="top" align="left">Verbal cue</td>
<td valign="top" align="center">0.010</td>
<td valign="top" align="center">0.009</td>
<td valign="top" align="center">341</td>
<td valign="top" align="center">1.036</td>
<td valign="top" align="center">0.301</td>
</tr>
<tr>
<td valign="top" align="left">Age group</td>
<td valign="top" align="center">0.008</td>
<td valign="top" align="center">0.009</td>
<td valign="top" align="center">341</td>
<td valign="top" align="center">0.851</td>
<td valign="top" align="center">0.396</td>
</tr>
<tr>
<td valign="top" align="left">Verbal cue <sup>&#x0002A;</sup> age group</td>
<td valign="top" align="center">0.002</td>
<td valign="top" align="center">0.009</td>
<td valign="top" align="center">341</td>
<td valign="top" align="center">0.177</td>
<td valign="top" align="center">0.860</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>Contrasts for both age group and verbal cue were sum-coded.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec sec-type="discussion" id="s4">
<title>Discussion</title>
<p>Our analysis revealed that toddlers&#x00027; proportional looking times to the pictures depicting correct and incorrect tool-use during test did not vary between the 18- and 24-month-olds or between the groups that had heard specific or unspecific verbal cues during action demonstration. Furthermore, proportional looking times in all groups did not differ from chance, suggesting that the toddlers did not learn to associate the two tools with their respective functional actions. These results were corroborated by the calculation of Bayes factors, which also showed moderate to strong evidence for the null hypothesis. Learning actions from videos in a naturalistic home setting thus entails important challenges for toddlers, challenges &#x000F6;which are not counteracted by the presentation of meaningful verbal cues.</p>
<p>Despite the consistent findings of a video deficit effect in screen-based action learning (Barr, <xref ref-type="bibr" rid="B2">2010</xref>; Strouse and Samson, <xref ref-type="bibr" rid="B31">2021</xref>), research has shown that toddlers can learn actions from videos under certain circumstances, for instance when demonstrations include pedagogical cues (Lauricella et al., <xref ref-type="bibr" rid="B17">2016</xref>) or naturalistic action descriptions (Seehagen and Herbert, <xref ref-type="bibr" rid="B28">2010</xref>). Furthermore, in a laboratory-based imitation study, 18- and 24-month-olds were able to learn and imitate similar pressing and pulling actions after observing a live demonstration, with more correct imitation when actions were presented with specific cues that labeled tools and actions (pulling out, pressing in) than with unspecific cues (doing that; Trouillet et al., <xref ref-type="bibr" rid="B33">2024</xref>). We had therefore expected that toddlers would be able to learn the tool-use actions from video demonstrations when the same specific verbal cues were provided. Taken together, the lack of learning in this online study suggests that while toddlers were able to learn to associate tools with their respective actions when observing a live demonstration, they were either unable to do so from videos in this unmoderated online study, or they did learn the association but our method of measuring learning&#x02013;through looking times at static test pictures&#x02013;was not effective.</p>
<p>Several factors could have impacted learning in this online experiment. First, online presentations diverge from laboratory-based video and live presentations in several key ways. For instance, testing occurs in the child&#x00027;s personal environment. On the one hand, this is an advantage because it is easier and more comfortable for families to participate when they do not have a specific appointment, making it more flexible. On the other hand, children participate in their familiar and potentially more stimulating environment, devoid of direct experimenter contact. While Seehagen and Herbert (<xref ref-type="bibr" rid="B28">2010</xref>) suggested that an experimenter&#x00027;s familiarity does not influence learning from videos, the personal interaction and atmosphere stemming from a lab or home visit by an experimenter might still enhance learning outcomes. Given these considerations, it would be interesting to determine if toddlers could learn these actions from interactive experiences in a moderated online experiment conducted via video chat. Here, the experimenter would engage with the toddler before the task and respond in a contingent manner to the toddler&#x00027;s behaviors and communication signals. Such interactivity has proven beneficial for learning words (Roseberry et al., <xref ref-type="bibr" rid="B26">2014</xref>; Myers et al., <xref ref-type="bibr" rid="B19">2017</xref>), discerning patterns (Myers et al., <xref ref-type="bibr" rid="B19">2017</xref>), locating hidden toys (Troseth et al., <xref ref-type="bibr" rid="B32">2018</xref>), and imitating actions after a brief delay (Nielsen et al., <xref ref-type="bibr" rid="B22">2008</xref>).</p>
<p>Second, in our live study (Trouillet et al., <xref ref-type="bibr" rid="B33">2024</xref>), toddlers had the opportunity to manually explore the tools before observing the action demonstrations. This hands-on experience may have enhanced children&#x00027;s processing of the demonstrations (as for example demonstrated for 10-month-olds&#x00027; understanding of others&#x00027; tool-use-actions; Sommerville et al., <xref ref-type="bibr" rid="B30">2008</xref>). Additionally, we maintained a consistent number of repetitions for the demonstrations in both the live and the online studies. It is possible that three instances per tool were not enough for toddlers to encode and learn the actions. Fittingly, research suggests that increasing the number of repetitions could mitigate the deficit effect for imitating from videos (e.g., Barr et al., <xref ref-type="bibr" rid="B4">2007</xref>). In sum, live demonstrations offer potential advantages for toddlers&#x00027; learning through interactivity between the experimenter and the child, as well as hands-on experiences, both of which are absent in online studies.</p>
<p>On a different note, toddlers might have learned the tool-action associations, but looking times toward static test pictures may not have been an adequate measure for their learning. While data gathered online often mirrors data collected in traditional lab settings (Scott et al., <xref ref-type="bibr" rid="B27">2017</xref>; Bacon et al., <xref ref-type="bibr" rid="B1">2021</xref>), there are instances where online studies failed to replicate in-lab results (Bochynska and Dillon, <xref ref-type="bibr" rid="B7">2021</xref>; Smith-Flores et al., <xref ref-type="bibr" rid="B29">2022</xref>). For instance, 7-month-olds did not discriminate shape changes in an unmoderated online study measuring proportional target looking times, which contradicted laboratory-based findings (Bochynska and Dillon, <xref ref-type="bibr" rid="B7">2021</xref>). Similarly, an online experiment that violated expectations about solidity of objects was unable to replicate common findings in 15- to 16-month-olds (Smith-Flores et al., <xref ref-type="bibr" rid="B29">2022</xref>). In both cases, replication failures could have resulted from the use of small personal screens, which might have limited the visibility of subtle shape changes (Bochynska and Dillon, <xref ref-type="bibr" rid="B7">2021</xref>) or distorted the visual angle of the stimuli (Smith-Flores et al., <xref ref-type="bibr" rid="B29">2022</xref>). It is possible that the visibility of critical tool features (specifically, the colored functional parts) was similarly compromised in our study. This could have prevented toddlers from perceiving differences between the tools, and in turn, hindered their ability to identify incorrect tool-use at test. Additionally, the static test pictures may not have been as effective as test videos in enabling toddlers to distinguish between correct and incorrect tool uses [as for example in Hernik and Csibra (<xref ref-type="bibr" rid="B14">2015</xref>)].</p>
<p>Specific verbal cues did also not impact toddlers&#x00027; learning, contradicting previous findings of a positive influence of specific verbal cues on imitative learning from live (Bonawitz et al., <xref ref-type="bibr" rid="B8">2010</xref>; Chen and Waxman, <xref ref-type="bibr" rid="B9">2013</xref>; Trouillet et al., <xref ref-type="bibr" rid="B33">2024</xref>) and video demonstrations (Seehagen and Herbert, <xref ref-type="bibr" rid="B28">2010</xref>; Lauricella et al., <xref ref-type="bibr" rid="B17">2016</xref>). While tool and action labels improved toddlers&#x00027; imitation of tool-use actions when presented in a live setting (Trouillet et al., <xref ref-type="bibr" rid="B33">2024</xref>), they may not have been as effective in facilitating action learning when presented in a video setting. Communicative cues can direct toddlers&#x00027; attention to certain aspects of a demonstration (e.g., Fukuyama and Myowa-Yamakoshi, <xref ref-type="bibr" rid="B11">2013</xref>) and labels can facilitate the perception of differences between objects (as found in object individuation studies; e.g., LaTourrette and Waxman, <xref ref-type="bibr" rid="B16">2020</xref>). However, if these aspects and differences are difficult to discern on a small screen, the cues may not be effective for toddlers&#x00027; action learning. In sum, neither specific nor unspecific verbal cues appeared to aid toddlers in learning the tool-use actions from videos at home. It remains therefore uncertain whether these cues had a neutral effect or perhaps impeded learning. To draw definitive conclusions, future research would need to incorporate a control condition without any verbal cues.</p>
<p>Although we did not find evidence of action learning in toddlers, our study supports the feasibility of conducting an online looking-time experiment with 18-and 24-month-olds. Only 14% of participants had to be excluded. For the remaining participants, the vast majority of webcam recordings in the test trials was of high quality and therefore straightforward to code, which was underscored by our excellent interrater-reliability. This also shows that parents understood and followed the webcam placement instructions displayed on screen. Most toddlers remained attentive throughout the experiment, and a majority of parents reported that their child enjoyed the experience. These observations support the viability of online experiments as a method for collecting webcam-based looking time data from toddlers at home [as seen in studies like Bacon et al. (<xref ref-type="bibr" rid="B1">2021</xref>) and Nelson and Oakes (<xref ref-type="bibr" rid="B21">2021</xref>)]. However, our null results raise the question of whether this method was suited to capture this particular area of cognitive development. So far, preferential looking times in online studies have successfully captured visual preferences in 4- to 12-month-olds for handled vs. non-handled objects (Nelson and Oakes, <xref ref-type="bibr" rid="B21">2021</xref>), familiar word recognition in 23- to 26-month-olds (Bacon et al., <xref ref-type="bibr" rid="B1">2021</xref>), and 16-month-olds&#x00027; matching of emotional utterances to corresponding pictures (Smith-Flores et al., <xref ref-type="bibr" rid="B29">2022</xref>). Yet, to our knowledge, our study has been the first to investigate toddlers&#x00027; action learning. Our research, therefore, raises awareness about the importance of carefully considering the suitability of online studies in that domain. Regarding recruitment, we found it most effective to contact parents personally over the phone to confirm their participation. Only a few families joined through online study advertisements, suggesting that while online experiments offer broader accessibility, personal contact remains crucial for participant recruitment.</p>
<p>In sum, this research demonstrated that online studies can be a feasible method for data collection with toddlers, providing looking time data through webcam recording and offline coding of satisfactory quality. However, this method may not be suited for all research questions. This study found no evidence that 18- and 24-month-olds transferred their observational experience with two tools and their respective functional actions to subsequently presented static pictures of correct or incorrect tool use. This finding suggests that live demonstrations provide specific aspects that enhance toddlers&#x00027; tool-use action learning (e.g., contingent social interaction, haptic exploration and handling of objects), and that online methods have methodological caveats when the presented stimuli have subtle differences that might not be easily distinguishable on personal screens. Especially for infants and toddlers, with their limited cognitive capacities, future research should continue to investigate factors that support learning, such as contingency of interaction via video chat. Such efforts acknowledge the fact that screens and mobile devices have become integral parts of toddlers&#x00027; learning environments.</p>
</sec>
<sec id="s5">
<title>Declaration of generative AI and AI-assisted technologies in the writing process</title>
<p>During the preparation of this work the author(s) used ChatGPT in order to check grammar and spelling and to improve the readability and language of their own writing. After using this tool/service, the author(s) reviewed and edited the content as needed and take(s) full responsibility for the content of the publication.</p>
</sec>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found at: <ext-link ext-link-type="uri" xlink:href="https://osf.io/7stb8/">https://osf.io/7stb8/</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by University of Potsdam Ethics Committee. The studies were conducted in accordance with the local legislation and institutional requirements. Written informed consent for participation in this study was provided by the participants&#x00027; legal guardians/next of kin. Written informed consent was obtained from the individual(s) for the publication of any identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>LT: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Software, Visualization, Writing &#x02013; original draft. RB: Conceptualization, Investigation, Methodology, Project administration, Writing &#x02013; review &#x00026; editing. NM: Conceptualization, Funding acquisition, Methodology, Resources, Supervision, Writing &#x02013; review &#x00026; editing. BE: Conceptualization, Funding acquisition, Methodology, Resources, Supervision, Writing &#x02013; review &#x00026; editing.</p>
</sec>
</body>
<back>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This research was supported by Deutsche Forschungsgemeinschaft (DFG; Research Unit FOR 2253, grant no. EL 253/7-2 granted to BE).</p>
</sec>
<ack><p>We thank the families and their toddlers for participating in the study and Dr. Markus Studtmann for helping us to create the stimuli used in this study.</p>
</ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fdpys.2024.1411276/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fdpys.2024.1411276/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Table_1.DOCX" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bacon</surname> <given-names>D.</given-names></name> <name><surname>Weaver</surname> <given-names>H.</given-names></name> <name><surname>Saffran</surname> <given-names>J.</given-names></name></person-group> (<year>2021</year>). <article-title>A framework for online experimenter-moderated looking-time studies assessing infants&#x00027; linguistic knowledge</article-title>. <source>Front. Psychol.</source> <volume>12</volume>:<fpage>703839</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2021.703839</pub-id><pub-id pub-id-type="pmid">34630211</pub-id></citation></ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barr</surname> <given-names>R.</given-names></name></person-group> (<year>2010</year>). <article-title>Transfer of learning between 2D and 3D sources during infancy: informing theory and practice</article-title>. <source>Dev. Rev.</source> <volume>30</volume>, <fpage>128</fpage>&#x02013;<lpage>154</lpage>. <pub-id pub-id-type="doi">10.1016/j.dr.2010.03.001</pub-id><pub-id pub-id-type="pmid">20563302</pub-id></citation></ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barr</surname> <given-names>R.</given-names></name> <name><surname>Kirkorian</surname> <given-names>H.</given-names></name> <name><surname>Radesky</surname> <given-names>J.</given-names></name> <name><surname>Coyne</surname> <given-names>S.</given-names></name> <name><surname>Nichols</surname> <given-names>D.</given-names></name> <name><surname>Blanchfield</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Beyond screen time: a synergistic approach to a more comprehensive assessment of family media exposure during early childhood</article-title>. <source>Front. Psychol.</source> <volume>11</volume>:<fpage>1283</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2020.01283</pub-id><pub-id pub-id-type="pmid">32754078</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barr</surname> <given-names>R.</given-names></name> <name><surname>Muentener</surname> <given-names>P.</given-names></name> <name><surname>Garcia</surname> <given-names>A.</given-names></name> <name><surname>Fujimoto</surname> <given-names>M.</given-names></name> <name><surname>Ch&#x000E1;vez</surname> <given-names>V.</given-names></name></person-group> (<year>2007</year>). <article-title>The effect of repetition on imitation from television during infancy</article-title>. <source>Dev. Psychobiol.</source> <volume>49</volume>, <fpage>196</fpage>&#x02013;<lpage>207</lpage>. <pub-id pub-id-type="doi">10.1002/dev.20208</pub-id><pub-id pub-id-type="pmid">17299795</pub-id></citation></ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bates</surname> <given-names>D.</given-names></name> <name><surname>Maechler</surname> <given-names>M.</given-names></name> <name><surname>Bolker</surname> <given-names>B.</given-names></name> <name><surname>Walker</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>Fitting linear mixed-effects models using lme4</article-title>. <source>J. Stat. Softw.</source> <volume>67</volume>, <fpage>1</fpage>&#x02013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.18637/jss.v067.i01</pub-id></citation>
</ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bergmann</surname> <given-names>C.</given-names></name> <name><surname>Dimitrova</surname> <given-names>N.</given-names></name> <name><surname>Alaslani</surname> <given-names>K.</given-names></name> <name><surname>Almohammadi</surname> <given-names>A.</given-names></name> <name><surname>Alroqi</surname> <given-names>H.</given-names></name> <name><surname>Aussems</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Young children&#x00027;s screen time during the first COVID-19 lockdown in 12 countries</article-title>. <source>Sci. Rep.</source> <volume>12</volume>, <fpage>2015</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-05840-5</pub-id><pub-id pub-id-type="pmid">35132065</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bochynska</surname> <given-names>A.</given-names></name> <name><surname>Dillon</surname> <given-names>M. R.</given-names></name></person-group> (<year>2021</year>). <article-title>Bringing home baby Euclid: testing infants&#x00027; basic shape discrimination online</article-title>. <source>Front. Psychol.</source> <volume>12</volume>:<fpage>734592</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2021.734592</pub-id><pub-id pub-id-type="pmid">35002837</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bonawitz</surname> <given-names>E. B.</given-names></name> <name><surname>Ferranti</surname> <given-names>D.</given-names></name> <name><surname>Saxe</surname> <given-names>R.</given-names></name> <name><surname>Gopnik</surname> <given-names>A.</given-names></name> <name><surname>Meltzoff</surname> <given-names>A. N.</given-names></name> <name><surname>Woodward</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2010</year>). <article-title>Just do it? Investigating the gap between prediction and action in toddlers&#x00027; causal inferences</article-title>. <source>Cognition</source> <volume>115</volume>, <fpage>104</fpage>&#x02013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2009.12.001</pub-id><pub-id pub-id-type="pmid">20097329</pub-id></citation></ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>M. L.</given-names></name> <name><surname>Waxman</surname> <given-names>S. R.</given-names></name></person-group> (<year>2013</year>). <article-title>&#x0201C;Shall we blick?&#x0201D; Novel words highlight actors&#x00027; underlying intentions for 14-month-old infants</article-title>. <source>Dev. Psychol.</source> <volume>49</volume>, <fpage>426</fpage>&#x02013;<lpage>431</lpage>. <pub-id pub-id-type="doi">10.1037/a0029486</pub-id><pub-id pub-id-type="pmid">22822935</pub-id></citation></ref>
<ref id="B10">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Finger</surname> <given-names>H.</given-names></name> <name><surname>Goeke</surname> <given-names>C.</given-names></name> <name><surname>Diekamp</surname> <given-names>D.</given-names></name> <name><surname>Standvo&#x000DF;</surname> <given-names>K.</given-names></name> <name><surname>K&#x000F6;nig</surname> <given-names>P.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;LabVanced: a unified JavaScript framework for online studies,&#x0201D;</article-title> in <source>International Conference on Computational Social Science (Cologne).</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.labvanced.com/publication.html">https://www.labvanced.com/publication.html</ext-link> (accessed July 25, 2024).<pub-id pub-id-type="pmid">35754522</pub-id></citation></ref>
<ref id="B11">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fukuyama</surname> <given-names>H.</given-names></name> <name><surname>Myowa-Yamakoshi</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Fourteen-month-old infants copy an action style accompanied by social-emotional cues</article-title>. <source>Infant Behav. Dev.</source> <volume>36</volume>, <fpage>609</fpage>&#x02013;<lpage>617</lpage>. <pub-id pub-id-type="doi">10.1016/j.infbeh.2013.06.005</pub-id><pub-id pub-id-type="pmid">23872481</pub-id></citation></ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gampe</surname> <given-names>A.</given-names></name> <name><surname>Daum</surname> <given-names>M. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Productive verbs facilitate action prediction in toddlers</article-title>. <source>Infancy</source> <volume>19</volume>, <fpage>301</fpage>&#x02013;<lpage>325</lpage>. <pub-id pub-id-type="doi">10.1111/infa.12047</pub-id></citation>
</ref>
<ref id="B13">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hernik</surname> <given-names>M.</given-names></name> <name><surname>Csibra</surname> <given-names>G.</given-names></name></person-group> (<year>2009</year>). <article-title>Functional understanding facilitates learning about tools in human children</article-title>. <source>Curr. Opin. Neurobiol.</source> <volume>19</volume>, <fpage>34</fpage>&#x02013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1016/j.conb.2009.05.003</pub-id><pub-id pub-id-type="pmid">19477630</pub-id></citation></ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hernik</surname> <given-names>M.</given-names></name> <name><surname>Csibra</surname> <given-names>G.</given-names></name></person-group> (<year>2015</year>). <article-title>Infants learn enduring functions of novel tools from action demonstrations</article-title>. <source>J. Exp. Child Psychol.</source> <volume>130</volume>, <fpage>176</fpage>&#x02013;<lpage>192</lpage>. <pub-id pub-id-type="doi">10.1016/j.jecp.2014.10.004</pub-id><pub-id pub-id-type="pmid">25462040</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kahn</surname> <given-names>M.</given-names></name> <name><surname>Barnett</surname> <given-names>N.</given-names></name> <name><surname>Glazer</surname> <given-names>A.</given-names></name> <name><surname>Gradisar</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <article-title>Covid-19 babies: auto-videosomnography and parent reports of infant sleep, screen time, and parent well-being in 2019 vs 2020</article-title>. <source>Sleep Med.</source> <volume>85</volume>, <fpage>259</fpage>&#x02013;<lpage>267</lpage>. <pub-id pub-id-type="doi">10.1016/j.sleep.2021.07.033</pub-id><pub-id pub-id-type="pmid">34388504</pub-id></citation></ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>LaTourrette</surname> <given-names>A. S.</given-names></name> <name><surname>Waxman</surname> <given-names>S. R.</given-names></name></person-group> (<year>2020</year>). <article-title>Naming guides how 12-month-old infants encode and remember objects</article-title>. <source>Proc. Natl. Acad. Sci. USA.</source> <volume>117</volume>, <fpage>21230</fpage>&#x02013;<lpage>21234</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.2006608117</pub-id><pub-id pub-id-type="pmid">32817508</pub-id></citation></ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lauricella</surname> <given-names>A. R.</given-names></name> <name><surname>Barr</surname> <given-names>R.</given-names></name> <name><surname>Calvert</surname> <given-names>S. L.</given-names></name></person-group> (<year>2016</year>). <article-title>Toddler learning from video: effect of matched pedagogical cues</article-title>. <source>Infant Behav. Dev.</source> <volume>45</volume>, <fpage>22</fpage>&#x02013;<lpage>30</lpage>. <pub-id pub-id-type="doi">10.1016/j.infbeh.2016.08.001</pub-id><pub-id pub-id-type="pmid">27591487</pub-id></citation></ref>
<ref id="B18">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Morey</surname> <given-names>R.</given-names></name> <name><surname>Rouder</surname> <given-names>J.</given-names></name></person-group> (<year>2024</year>). <source>BayesFactor: Computation of Bayes Factors for Common Designs</source>. [Computer software]. Available online at: <ext-link ext-link-type="uri" xlink:href="https://CRAN.R-project.org/package=BayesFactor">https://CRAN.R-project.org/package=BayesFactor</ext-link> (accessed July 25, 2024).</citation>
</ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Myers</surname> <given-names>L. J.</given-names></name> <name><surname>LeWitt</surname> <given-names>R. B.</given-names></name> <name><surname>Gallo</surname> <given-names>R. E.</given-names></name> <name><surname>Maselli</surname> <given-names>N. M.</given-names></name></person-group> (<year>2017</year>). <article-title>Baby FaceTime: can toddlers learn from online video chat?</article-title> <source>Dev. Sci.</source> <volume>20</volume>:<fpage>e12430</fpage>. <pub-id pub-id-type="doi">10.1111/desc.12430</pub-id><pub-id pub-id-type="pmid">27417537</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>N&#x000ED; Choisdealbha</surname> <given-names>A.</given-names></name> <name><surname>Westermann</surname> <given-names>G.</given-names></name> <name><surname>Dunn</surname> <given-names>K.</given-names></name> <name><surname>Reid</surname> <given-names>V.</given-names></name></person-group> (<year>2016</year>). <article-title>Dissociating associative and motor aspects of action understanding: processing of dual-ended tools by 16-month-old infants</article-title>. <source>Br. J. Dev. Psychol.</source> <volume>34</volume>, <fpage>115</fpage>&#x02013;<lpage>131</lpage>. <pub-id pub-id-type="doi">10.1111/bjdp.12116</pub-id><pub-id pub-id-type="pmid">26414113</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nelson</surname> <given-names>C. M.</given-names></name> <name><surname>Oakes</surname> <given-names>L. M.</given-names></name></person-group> (<year>2021</year>). <article-title>&#x0201C;May I Grab Your Attention?&#x0201D;: an investigation into infants&#x00027; visual preferences for handled objects using Lookit as an online platform for data collection</article-title>. <source>Front. Psychol.</source> <volume>12</volume>:<fpage>733218</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2021.733218</pub-id><pub-id pub-id-type="pmid">34566820</pub-id></citation></ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nielsen</surname> <given-names>M.</given-names></name> <name><surname>Simcock</surname> <given-names>G.</given-names></name> <name><surname>Jenkins</surname> <given-names>L.</given-names></name></person-group> (<year>2008</year>). <article-title>The effect of social engagement on 24-month-olds&#x00027; imitation from live and televised models</article-title>. <source>Dev. Sci.</source> <volume>11</volume>, <fpage>722</fpage>&#x02013;<lpage>731</lpage>. <pub-id pub-id-type="doi">10.1111/j.1467-7687.2008.00722.x</pub-id><pub-id pub-id-type="pmid">18801128</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Patzwald</surname> <given-names>C.</given-names></name> <name><surname>Elsner</surname> <given-names>B.</given-names></name></person-group> (<year>2019</year>). <article-title>Do as I say - or as I do?! How 18- and 24-month-olds integrate words and actions to infer intentions in situations of match or mismatch</article-title>. <source>Infant Behav. Dev.</source> <volume>55</volume>, <fpage>46</fpage>&#x02013;<lpage>57</lpage>. <pub-id pub-id-type="doi">10.1016/j.infbeh.2019.03.004</pub-id><pub-id pub-id-type="pmid">30921544</pub-id></citation></ref>
<ref id="B24">
<citation citation-type="web"><person-group person-group-type="author"><collab>R Core Team</collab></person-group> (<year>2019</year>). <source>R: A Language and Environment for Statistical Computing</source> [Computer software]. <publisher-loc>Vienna</publisher-loc>: <publisher-name>R Foundation for Statistical Computing</publisher-name>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.R-project.org/">https://www.R-project.org/</ext-link> (accessed July 25, 2024).</citation>
</ref>
<ref id="B25">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Rideout</surname> <given-names>V.</given-names></name></person-group> (<year>2013</year>). <source>Zero to Eight: Children&#x00027;s Media Use in America.</source> <publisher-loc>San Francisco, CA</publisher-loc>: <publisher-name>Common Sense Media</publisher-name>.<pub-id pub-id-type="pmid">37368873</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Roseberry</surname> <given-names>S.</given-names></name> <name><surname>Hirsh-Pasek</surname> <given-names>K.</given-names></name> <name><surname>Golinkoff</surname> <given-names>R. M.</given-names></name></person-group> (<year>2014</year>). <article-title>Skype me! Socially contingent interactions help toddlers learn language</article-title>. <source>Child Dev.</source> <volume>85</volume>, <fpage>956</fpage>&#x02013;<lpage>970</lpage>. <pub-id pub-id-type="doi">10.1111/cdev.12166</pub-id><pub-id pub-id-type="pmid">24112079</pub-id></citation></ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scott</surname> <given-names>K.</given-names></name> <name><surname>Chu</surname> <given-names>J.</given-names></name> <name><surname>Schulz</surname> <given-names>L.</given-names></name></person-group> (<year>2017</year>). <article-title>Lookit (Part 2): assessing the viability of online developmental research, results from three case studies</article-title>. <source>Open Mind</source> <volume>1</volume>, <fpage>15</fpage>&#x02013;<lpage>29</lpage>. <pub-id pub-id-type="doi">10.1162/OPMI_a_00001</pub-id></citation>
</ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seehagen</surname> <given-names>S.</given-names></name> <name><surname>Herbert</surname> <given-names>J. S.</given-names></name></person-group> (<year>2010</year>). <article-title>The role of demonstrator familiarity and language cues on infant imitation from television</article-title>. <source>Infant Behav. Dev.</source> <volume>33</volume>, <fpage>168</fpage>&#x02013;<lpage>175</lpage>. <pub-id pub-id-type="doi">10.1016/j.infbeh.2009.12.008</pub-id><pub-id pub-id-type="pmid">20137815</pub-id></citation></ref>
<ref id="B29">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Smith-Flores</surname> <given-names>A. S.</given-names></name> <name><surname>Perez</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>M. H.</given-names></name> <name><surname>Feigenson</surname> <given-names>L.</given-names></name></person-group> (<year>2022</year>). <article-title>Online measures of looking and learning in infancy</article-title>. <source>Infancy</source> <volume>27</volume>, <fpage>4</fpage>&#x02013;<lpage>24</lpage>. <pub-id pub-id-type="doi">10.1111/infa.12435</pub-id><pub-id pub-id-type="pmid">34524727</pub-id></citation></ref>
<ref id="B30">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sommerville</surname> <given-names>J. A.</given-names></name> <name><surname>Hildebrand</surname> <given-names>E. A</given-names></name> <name><surname>Crane</surname> <given-names>C. C.</given-names></name></person-group> (<year>2008</year>). <article-title>Experience matters: the impact of doing versus watching on infants&#x00027; subsequent perception of tool-use events</article-title>. <source>Dev. Psychol.</source> <volume>44</volume>, <fpage>1249</fpage>&#x02013;<lpage>1256</lpage>. <pub-id pub-id-type="doi">10.1037/a0012296</pub-id><pub-id pub-id-type="pmid">18793059</pub-id></citation></ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Strouse</surname> <given-names>G. A.</given-names></name> <name><surname>Samson</surname> <given-names>J. E.</given-names></name></person-group> (<year>2021</year>). <article-title>Learning from video: a meta-analysis of the video deficit in children ages 0 to 6 years</article-title>. <source>Child Dev.</source> <volume>92</volume>, <fpage>e20</fpage>&#x02013;<lpage>e38</lpage>. <pub-id pub-id-type="doi">10.1111/cdev.13429</pub-id><pub-id pub-id-type="pmid">33491209</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Troseth</surname> <given-names>G. L.</given-names></name> <name><surname>Strouse</surname> <given-names>G. A.</given-names></name> <name><surname>Verdine</surname> <given-names>B. N.</given-names></name> <name><surname>Saylor</surname> <given-names>M. M.</given-names></name></person-group> (<year>2018</year>). <article-title>Let&#x00027;s chat: on-screen social responsiveness is not sufficient to support toddlers&#x00027; word learning from video</article-title>. <source>Front. Psychol.</source> <volume>9</volume>:<fpage>411949</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2018.02195</pub-id><pub-id pub-id-type="pmid">30483198</pub-id></citation></ref>
<ref id="B33">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Trouillet</surname> <given-names>L.</given-names></name> <name><surname>Bothe</surname> <given-names>R.</given-names></name> <name><surname>Mani</surname> <given-names>N.</given-names></name> <name><surname>Elsner</surname> <given-names>B.</given-names></name></person-group> (<year>2024</year>). <article-title>Distinctive verbal cues support the learning of tool-use actions in 18- and 24-month-olds</article-title>. <source>Psyarxiv</source> [Preprint]. Available online at: <ext-link ext-link-type="uri" xlink:href="https://osf.io/preprints/psyarxiv/4m9da">osf.io/preprints/psyarxiv/4m9da</ext-link> (accessed April 2, 2024).</citation>
</ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tsuji</surname> <given-names>S.</given-names></name> <name><surname>Amso</surname> <given-names>D.</given-names></name> <name><surname>Cusack</surname> <given-names>R.</given-names></name> <name><surname>Kirkham</surname> <given-names>N.</given-names></name> <name><surname>Oakes</surname> <given-names>L. M.</given-names></name></person-group> (<year>2022</year>). <article-title>Editorial: Empirical research at a distance: new methods for developmental Science</article-title>. <source>Front. Psychol.</source> <volume>13</volume>:<fpage>938995</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyg.2022.938995</pub-id><pub-id pub-id-type="pmid">35693494</pub-id></citation></ref>
</ref-list>
</back>
</article>