<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychiatry</journal-id>
<journal-title-group>
<journal-title>Frontiers in Psychiatry</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychiatry</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-0640</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyt.2026.1769322</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Leveraging point-of-view camera and MediaPipe for objective hyperactivity assessment in preschool ADHD</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Kay&#x131;&#x15f;</surname><given-names>Hakan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>*</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3234220/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Gedizlio&#x11f;lu</surname><given-names>&#xc7;&#x131;nar</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3257837/overview"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &amp; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Child and Adolescent Psychiatry, Faculty of Medicine, Zonguldak B&#xfc;lent Ecevit University</institution>, <city>Zonguldak</city>,&#xa0;<country country="check-value">T&#xfc;rkiye</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Computer Engineering, &#x130;zmir University of Economics</institution>, <city>&#x130;zmir</city>,&#xa0;<country country="check-value">T&#xfc;rkiye</country></aff>
<author-notes>
<corresp id="c001"><label>*</label>Correspondence: Hakan Kay&#x131;&#x15f;, <email xlink:href="mailto:drhakankayis@gmail.com">drhakankayis@gmail.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-04">
<day>04</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1769322</elocation-id>
<history>
<date date-type="received">
<day>16</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>16</day>
<month>02</month>
<year>2026</year>
</date>
<date date-type="rev-recd">
<day>09</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Kay&#x131;&#x15f; and Gedizlio&#x11f;lu.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Kay&#x131;&#x15f; and Gedizlio&#x11f;lu</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-04">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Background</title>
<p>Attention-Deficit/Hyperactivity Disorder (ADHD) often emerges in early childhood, with hyperactivity and impulsivity constituting the most prominent symptoms during the preschool period. Current assessment approaches rely largely on clinical interviews and behavior rating scales, which are susceptible to subjectivity and contextual bias. Objective, ecologically valid, and low-burden methods for quantifying hyperactivity in preschool settings remain limited.</p>
</sec>
<sec>
<title>Methods</title>
<p>This observational, cross-sectional study investigated whether movement-based features extracted from teacher-worn point-of-view (POV) video recordings could differentiate preschool children at risk for ADHD-related hyperactivity from non-hyperactive peers. Fifty-one preschool children (48&#x2013;60 months) participated in a standardized, three-minute storytelling interaction conducted in a familiar classroom environment. Video recordings were processed using MediaPipe pose estimation to derive region-specific movement indices across multiple body segments. Group differences were examined using statistical analyses. In addition, supervised machine learning models were applied to evaluate classification performance based on movement features.</p>
</sec>
<sec>
<title>Results</title>
<p>Children in the hyperactivity-risk group exhibited significantly greater movement across several body regions, particularly distal upper- and lower-limb segments, compared to non-hyperactive peers. Machine learning analyses indicated promising classification performance, with the support vector machine achieving an accuracy of 84.31%, sensitivity of 80.0%, specificity of 87.10%, and an area under the receiver operating characteristic curve (AUC) of 0.83. Permutation-based feature importance analyses highlighted distal limb movements as the most informative features for classification.</p>
</sec>
<sec>
<title>Conclusions</title>
<p>These findings suggest that POV-based, vision-driven assessment provides a promising, objective, and ecologically valid approach for quantifying hyperactivity-related motor behavior in preschool children. While not intended as a standalone diagnostic tool, this low-burden framework may serve as a valuable complement to existing screening practices and support early identification efforts in educational settings.</p>
</sec>
</abstract>
<kwd-group>
<kwd>ADHD</kwd>
<kwd>digital phenotyping</kwd>
<kwd>early screening</kwd>
<kwd>ecological validity</kwd>
<kwd>hyperactivity</kwd>
<kwd>machine learning</kwd>
<kwd>point-of-view video</kwd>
<kwd>pose estimation</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="6"/>
<table-count count="4"/>
<equation-count count="0"/>
<ref-count count="66"/>
<page-count count="16"/>
<word-count count="8855"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Digital Mental Health</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro">
<title>Introduction</title>
<p>Attention-Deficit/Hyperactivity Disorder (ADHD) is a neurodevelopmental condition characterized by inattention, hyperactivity, and impulsivity, with onset typically in early childhood (<xref ref-type="bibr" rid="B1">1</xref>). It is associated with impairments in academic, social, and daily functioning (<xref ref-type="bibr" rid="B2">2</xref>&#x2013;<xref ref-type="bibr" rid="B4">4</xref>). Although prevalence estimates vary, global childhood prevalence is generally around 7% (<xref ref-type="bibr" rid="B5">5</xref>), with recent U.S. data reporting 10.5% among children aged 3&#x2013;17 years (<xref ref-type="bibr" rid="B6">6</xref>). Preschool prevalence rates are estimated between 3% and 10.5%, depending on diagnostic methods and cultural contexts (<xref ref-type="bibr" rid="B7">7</xref>&#x2013;<xref ref-type="bibr" rid="B9">9</xref>). Symptom presentation also shifts with development, with hyperactivity/impulsivity predominating in younger children and inattention becoming more prominent with age (<xref ref-type="bibr" rid="B10">10</xref>).</p>
<p>The diagnosis of ADHD is currently based on a comprehensive clinical evaluation involving the child, parents, and teachers, with reference to DSM-5 criteria (<xref ref-type="bibr" rid="B11">11</xref>). Criteria require that symptoms persist for at least six months, are present in more than one context, and cause functional impairment (<xref ref-type="bibr" rid="B1">1</xref>). In preschoolers, inattentive symptoms may be less distinctive developmentally, while hyperactivity, impulsivity, difficulties with rule-following, and problems in social adaptation are particularly salient (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B12">12</xref>). To support diagnostic decision-making, standardized scales such as the Conners Early Childhood Parent and Teacher Rating Scales, the ADHD Rating Scale-IV Preschool Version (ADHD-RS-IV-P), and the Early Childhood Inventory-4 (ECI-4) are widely used (<xref ref-type="bibr" rid="B13">13</xref>&#x2013;<xref ref-type="bibr" rid="B15">15</xref>). These tools provide valuable reports from parents and teachers regarding children&#x2019;s behavior across settings.</p>
<p>Despite their clinical utility, there is currently no single biological marker or objective diagnostic test for ADHD. Symptom rating scales and clinical judgment remain central to ADHD assessment; however, current diagnostic pathways rely heavily on clinical interviews and multi-informant rating scales that are vulnerable to recall and expectancy biases (<xref ref-type="bibr" rid="B16">16</xref>). In response, a range of objective measures has been investigated. Chief among these are Continuous Performance Tests (CPTs). Computerized paradigms that index sustained attention, response inhibition, and vigilance through target&#x2013;nontarget discrimination and speed&#x2013;accuracy trade-offs; typical outcome metrics include omission and commission errors, reaction time, and reaction time variability (<xref ref-type="bibr" rid="B17">17</xref>). Notably, neurocognitive tests have also been administered in some studies with preschool-aged children (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B19">19</xref>).</p>
<p>Neurophysiological approaches have likewise been explored. Electroencephalography (EEG) has been recorded both at rest and during CPT performance, and several studies have applied machine-learning techniques to EEG features to aid ADHD classification (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B21">21</xref>). Parallel efforts using neuroimaging&#x2014;such as applying machine learning to features derived from functional MRI&#x2014;have also been reported (<xref ref-type="bibr" rid="B22">22</xref>). More recently, virtual reality (VR) paradigms that simulate everyday tasks and capture ecologically valid behavioral and performance data have emerged as a promising direction (<xref ref-type="bibr" rid="B23">23</xref>). Nevertheless, these technologies, whether behavioral, neurophysiological, or neuroimaging-based, are not yet sufficiently reliable to serve as standalone diagnostic methods (<xref ref-type="bibr" rid="B24">24</xref>).</p>
<p>In addition to these task-based cognitive measures and brain-based methods, real-world behavioral activity has been quantified using both wearable sensors and computer vision methods. Accelerometers provide flexibility across settings but require long monitoring periods and extensive data processing to ensure reliability (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>). Marker-based systems such as Infrared Motion Tracking (IMT) capture movements through reflective markers and infrared cameras (<xref ref-type="bibr" rid="B27">27</xref>). More recently, video-based methods have shown considerable promise. For example, Wehrmann and M&#xfc;ller (2015) estimated activity levels using webcam video compression, while Chiu et&#xa0;al. (2024) applied pixel subtraction and machine learning techniques to clinical recordings to distinguish children with ADHD from controls (<xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B29">29</xref>). Yet, even when these methods operate in &#x201c;naturalistic&#x201d; environments, they often impose nontrivial burdens&#x2014;children may need to wear devices (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B25">25</xref>) or interact with conspicuous hardware (<xref ref-type="bibr" rid="B28">28</xref>).</p>
<p>To address these limitations, point-of-view (POV) eyeglasses offer an unobtrusive, ecologically valid means of capturing moment-to-moment behaviors without materially disrupting natural interaction. In psychiatry, POV systems have been applied across a range of conditions and modalities, particularly within treatment interventions. For example, they have been used as supportive tools to facilitate socialization in autism (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B31">31</xref>); and as instructional media for teaching play skills to autistic children (<xref ref-type="bibr" rid="B32">32</xref>). Beyond intervention, POV cameras have been shown to measure eye contact during natural social exchanges in a safe and valid manner, while offering a relatively low-cost, scalable approach (<xref ref-type="bibr" rid="B33">33</xref>). More recently, Ahn et&#xa0;al. (2024) leveraged POV-derived video to quantify gaze and smiling behaviors and examined their associations with autism severity, further underscoring the promise of POV methodologies for objective assessment (<xref ref-type="bibr" rid="B34">34</xref>). Extending the use of POV approaches beyond developmental populations, Kay&#x131;&#x15f; et&#xa0;al. (2025) employed multimodal features derived from POV recordings during semi-structured clinical interviews to detect autism and depression in two related studies, highlighting the ecological validity of first-person behavioral sensing in psychiatric research (<xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B36">36</xref>). However, existing POV work has focused primarily on affective behaviors, social attention and intervention; to our knowledge, no studies have quantitatively evaluated preschool hyperactivity in naturalistic interactions using POV systems.</p>
<p>Motivated by the unmet need for cost-effective, developmentally appropriate, and objective behavioral measures for preschool populations, we leverage first-person, point-of-view (POV) recordings. We capture a standardized, teacher-led storytelling interaction and derive quantitative behavioral indices directly from the classroom&#x2014;an ecologically valid, real-life context. To our knowledge, this is the first investigation to objectively characterize preschool hyperactivity using POV eyeglasses and to anchor evaluation in authentic social exchanges while maintaining a scalable, resource-efficient workflow.</p>
<p>We address two questions:</p>
<list list-type="order">
<list-item>
<p>In naturalistic classroom interactions, do movement-based features derived from body regions (e.g., head, trunk, upper limbs) differ between preschoolers at risk for ADHD and non-risk peers&#x2014;i.e., do whole-body mobility metrics extracted from in-class POV video show meaningful group differences?</p></list-item>
<list-item>
<p>Can these differences be detected and reliably classified using machine-learning methods suitable for screening, yielding models that are performant, interpretable where possible?</p></list-item>
</list>
</sec>
<sec id="s2">
<title>Methods</title>
<sec id="s2_1">
<title>Study design</title>
<p>This study employed an observational, cross-sectional design to quantify hyperactivity-related behavioral markers in preschool children during a brief, standardized story-listening interaction. A single trained preschool teacher (female, ~32 years) conducted all sessions while wearing point-of-view (POV) glasses with a front-facing camera (1920&#xd7;1080, 30 fps), enabling unobtrusive, first-person recording of child behavior from a natural classroom perspective. Each child was seen individually in a quiet, vacated classroom within their own school; lighting and seating geometry were held constant, with the teacher and child positioned face-to-face at a fixed distance of 300 cm. The teacher delivered a three-minute story via headphones while reading aloud to preserve natural prosody. Immediately afterward, a six-item, segment-aligned comprehension questionnaire was administered to index attention and recall. POV videos were de-identified and processed offline by an engineer blinded to diagnostic grouping, using a predefined computer-vision pipeline (MediaPipe pose estimation) to extract frame-wise landmarks and derive movement metrics (e.g., cumulative displacement, landmark-based movement variability) as objective proxies of hyperactivity.</p>
</sec>
<sec id="s2_2">
<title>Recruitment</title>
<p>Our study was conducted in a regional preschool. Ethical approval was obtained from the Non-Interventional Research Ethics Committee of Zonguldak B&#xfc;lent Ecevit University (decision no. 2025/07, dated 09/04/2025). Permission to conduct the study at the participating preschool was obtained through the Turkish Ministry of National Education. Written informed consent was obtained from the parents or legal guardians of all participating children. A total of 51 preschoolers participated in the study (29 boys, 22 girls), aged between 48 and 60 months. Participant recruitment and data collection were conducted between May and September 2025. All children were evaluated by an experienced child and adolescent psychiatrist using DSM-5-TR criteria, with additional information obtained through structured teacher interviews to assess classroom behavior.</p>
<p>Exclusion criteria were current medication use, intellectual disability, autism spectrum disorder, and severe psychiatric comorbidities other than ADHD. Accordingly, 2 children were excluded due to cognitive impairment, 3 children due to anxiety disorder, 1 children due to autism spectrum disorder. Teachers completed the Conners Teacher Rating Scale&#x2013;Revised: Short Form (CTRS-R:S), a widely used instrument for assessing hyperactivity, inattention, and oppositional behaviors in early childhood settings. The scale has demonstrated robust psychometric properties in international preschool samples, including strong internal consistency and factor validity (<xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B38">38</xref>). The Turkish adaptation has likewise shown high internal consistency and a factor structure consistent with the original version, supporting its applicability for evaluating teacher-reported behavioral difficulties in Turkish children (<xref ref-type="bibr" rid="B39">39</xref>). However, although the Turkish version has established linguistic and psychometric adequacy, nationally normed T-scores and culture-specific clinical cut-offs have not yet been developed. In the present study, CTRS-R:S Hyperactivity scores were therefore used as dimensional indices to characterize symptom severity and to examine their associations with movement-based metrics derived from POV video.</p>
<p>The hyperactivity-risk group consisted of preschoolers classified as positive for ADHD-related hyperactivity/impulsivity according to DSM-5-TR criteria, indicating developmentally inappropriate levels of hyperactive and/or impulsive symptoms. Classroom manifestations of hyperactivity were further corroborated through structured teacher reports, including persistent difficulty remaining seated, excessive motor activity, and disruptive movement during group activities. The non-hyperactive comparison group included children for whom evaluation according to DSM-5-TR criteria did not indicate clinically significant hyperactivity, impulsivity, or functional impairment, and whose teachers reported no concerns regarding activity level or behavioral regulation in the classroom. Using these criteria, the final sample comprised 51 preschool children aged 48&#x2013;60 months, including 20 children in the hyperactivity-risk group (14 boys and 6 girls) and 31 children in the non-hyperactive comparison group (16 girls and 15 boys). Children for whom the DSM-5-TR&#x2013;based evaluation and teacher-reported classroom behavior were inconsistent (n = 7) were excluded <italic>a priori</italic> from group assignment to ensure classification based on concordant behavioral evidence.</p>
</sec>
<sec id="s2_3">
<title>Clinical experiment</title>
<p>A single preschool teacher (female, 32 years) conducted all recordings. This design choice was intended to minimize systematic variability related to interaction style and head-mounted POV camera motion, and to reduce the likelihood that between-group differences could be attributed to experimenter-related factors rather than child behavior. Before data collection, she underwent structured training in the use of point-of-view (POV) glasses and completed pilot sessions to ensure protocol fidelity, with emphasis on maintaining a stable head position and centering the child in the field of view. To minimize schedule-related variability, sessions were held during standardized morning activity periods. All assessments took place in one preschool, in the same vacated classroom, under constant environmental conditions (fixed teacher&#x2013;child distance and consistent ambient lighting, with no background noise).</p>
<p>Each child was individually brought into the room and informed that a story would be read aloud. The teacher and child sat face-to-face at a fixed distance of 300 cm. The teacher wore POV glasses and tell the story for exactly three minutes while simultaneously listening through headphones to standardize delivery. A storybook was placed in front of the teacher to maintain the naturalness of the interaction (<xref ref-type="fig" rid="f1"><bold>Figure&#xa0;1</bold></xref>). Video was captured at 1920 &#xd7; 1080 resolution and 30 frames per second (<xref ref-type="bibr" rid="B33">33</xref>). The POV recordings provided a continuous first-person view of the child during the interaction. These videos were subsequently processed using the MediaPipe Pose framework to extract body landmarks and quantify movement patterns. <xref ref-type="fig" rid="f2"><bold>Figure&#xa0;2</bold></xref> illustrates a representative POV video frame and the corresponding pose landmark detection used for movement analysis.</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Experimental setup and configuration of the recording environment. <bold>(a)</bold> Schematic diagram illustrating the spatial geometry of the testing room, with the teacher and child participant seated face-to-face at a distance of 3 m <bold>(b)</bold> Photograph of the first-person point-of-view (POV) wearable camera used for video recording. <bold>(c)</bold> Photograph of the actual experimental setting, demonstrating the nature of the recording setup during the teacher&#x2013;child interaction.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1769322-g001.tif">
<alt-text content-type="machine-generated">Panel a: Illustration of a room setup with a teacher and a child participant sitting three meters apart, facing each other, with labels indicating their roles and distance. Panel b: Photograph of black glasses with an integrated point-of-view camera labeled on a wooden surface. Panel c: Photograph showing an adult wearing the camera glasses and holding a picture book while facing a seated child in a sparse room; the child's face is obscured for privacy.</alt-text>
</graphic></fig>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Representative POV camera frame showing the child seated opposite the teacher and the corresponding MediaPipe Pose landmark detection used for automated movement analysis. For privacy reasons, the participant&#x2019;s face is fully masked and facial landmarks are not shown. The figure is provided for illustrative purposes only.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1769322-g002.tif">
<alt-text content-type="machine-generated">Person sitting against a wall on a chair, with a black circle obscuring their face and a stick-figure outline superimposed on their body, wearing a dark sweatshirt and light pants.</alt-text>
</graphic></fig>
<p>For the procedure, the teacher read the story entitled <italic>&#x201c;The Journey Game&#x201d;</italic> (<italic>Yolculuk Oyunu</italic>) to each participant for a duration of three minutes. This story was distributed to preschools in the 2024&#x2013;2025 academic year by the Turkish Ministry of National Education (<xref ref-type="bibr" rid="B40">40</xref>). Immediately after the session, the researcher administered a structured questionnaire related to the story, consisting of six comprehension questions corresponding to each 30-second segment (<xref ref-type="supplementary-material" rid="SM1"><bold>Appendix 1</bold></xref>).</p>
<p>The comprehension questions were selected to correspond directly to each 30-second segment of the story, ensuring an age-appropriate cognitive load. The story&#x2019;s official distribution by the Ministry of National Education guaranteed developmental suitability and standardization across participants. This design supports content validity, as all children were exposed to the same stimuli and evaluated with identical questions, targeting attention, recall, and comprehension processes that are particularly relevant for hyperactivity and inattention.</p>
</sec>
<sec id="s2_4">
<title>Feature extraction</title>
<p>Data collected during the standardized interviews were subsequently analyzed using computer software. Recordings were temporally aligned to the onset of the teacher&#x2019;s first spoken word and trimmed to an exact 180-s window (&#x2248;5,400 frames at 30 fps). Clips were reviewed for drop-frames and timing jitter. Tracking quality was defined per frame and per landmark using the model&#x2019;s confidence output; frames with landmark confidence &lt;0.50 were marked invalid for that landmark. Sessions were excluded if global missingness exceeded 20% of frames (n=3). For sessions with 0&#x2013;20% missing data, we applied a two-tiered procedure: (i) short gaps &#x2264;10 consecutive frames (&#x2264;333 ms) were filled by linear interpolation in 3D for the affected landmark; (ii) longer gaps were smoothed with a constant-velocity Kalman filter (<xref ref-type="bibr" rid="B41">41</xref>) initialized on the nearest valid samples. In addition, landmark trajectories were temporally smoothed using the One Euro Filter, an adaptive low-pass filter that reduces high-frequency jitter while preserving rapid, meaningful movements (<xref ref-type="bibr" rid="B42">42</xref>).</p>
<p>Video recordings were processed with MediaPipe, an open-source real-time computer vision framework developed by Google for human pose estimation (<xref ref-type="bibr" rid="B43">43</xref>). Prior research has shown that MediaPipe provides reliable algorithms for motion tracking and joint angle estimation with acceptable error margins compared to gold standard marker-based systems, making it a cost-effective tool for clinical and developmental applications (<xref ref-type="bibr" rid="B44">44</xref>&#x2013;<xref ref-type="bibr" rid="B46">46</xref>). Importantly, the framework has also been applied successfully in studies involving infants (<xref ref-type="bibr" rid="B47">47</xref>) and children across diverse age groups (<xref ref-type="bibr" rid="B48">48</xref>). For each frame, the MediaPipe Pose Landmarker returned 3D coordinates (x, y, z) for 33 anatomical keypoints (<xref ref-type="fig" rid="f3"><bold>Figure&#xa0;3</bold></xref>).</p>
<fig id="f3" position="float">
<label>Figure&#xa0;3</label>
<caption>
<p>MediaPipe Pose Landmarks used in the present study (33 keypoints) (<xref ref-type="bibr" rid="B43">43</xref>). The figure illustrates the 33 anatomical landmarks detected by the MediaPipe Pose Landmarker model, including facial keypoints (0&#x2013;10), upper-body landmarks (11&#x2013;24), and lower-body landmarks (25&#x2013;32). These landmarks were extracted frame-by-frame from POV recordings and used to compute movement-based features (e.g., cumulative displacement, segment-wise variability) as objective indicators of child motor activity during the storytelling task.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1769322-g003.tif">
<alt-text content-type="machine-generated">Diagram of a simplified human skeleton model using numbered nodes connected by black lines, with nodes highlighted as red dots representing key body, face, and limb points for pose estimation.</alt-text>
</graphic></fig>
<p>Although the MediaPipe Pose Landmarker outputs 3D coordinates (x, y, z), the depth (z-axis) estimates derived from monocular RGB video are known to be relative and less reliable in absolute terms, particularly in single-camera conditions (<xref ref-type="bibr" rid="B49">49</xref>). Accordingly, movement quantification in the present study focused primarily on displacements within the image plane (x&#x2013;y), which provide the most robust and interpretable estimates of gross motor activity. Z-axis displacements were therefore not included in the primary movement indices and were considered only as approximate indicators of forward&#x2013;backward motion relative to the camera.</p>
<p>After extraction, landmarks were aggregated into anatomically meaningful composite points to reduce redundancy and improve tracking robustness. Specifically, the head was represented by the centroid of 11 facial landmarks (nose, eyes, ears, and mouth), which move largely as a rigid unit during gross head displacement. Similarly, hand proxies were computed as the centroid of three distal hand landmarks (thumb, index, and pinky) for each side, and foot proxies were computed as the centroid of the heel and foot-index landmarks for each side. Subsequent analyses were performed on the resulting set of composite and joint landmarks: head (11-landmark centroid), left/right shoulder, left/right elbow, left/right wrist, left/right hip, left/right knee, left/right ankle, left/right hand (3-landmark centroid), and left/right foot (2-landmark centroid). Movement was quantified by computing Euclidean displacement over short temporal windows. Specifically, landmark positions were averaged within consecutive non-overlapping 5-frame windows (&#x2248;167 ms at 30 fps), and displacement was calculated between successive window means. These displacements were then summed across the 3-minute recording to derive cumulative movement indices reflecting overall motor activity during the storytelling task. This analytic strategy reduces the influence of frame-level jitter inherent in monocular pose estimation and captures behaviorally meaningful motor fluctuations. The approach was conceptually inspired by displacement-based methods used in prior work to quantify naturalistic movement dynamics from automatic pose estimation (<xref ref-type="bibr" rid="B50">50</xref>).</p>
<p>In accordance with prior work, movement quantification was performed using a pelvis-root reference coordinate frame, with the root joint positioned at the center of the pelvis (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>). Person-centric (root-centric) representations express all 3D keypoint coordinates relative to this central root, effectively re-anchoring the body so that joint positions are computed with respect to the pelvis. This approach is particularly advantageous in single-person, monocular pose estimation, as it reduces scale variation and stabilizes body-motion trajectories (<xref ref-type="bibr" rid="B53">53</xref>). By using a pelvis-centered reference frame, movement features primarily captured relative limb and segmental motion rather than absolute whole-body translation in the scene.</p>
<p>For each participant, the following parameters were extracted:</p>
<list list-type="bullet">
<list-item>
<p>Regional activity indices were computed as cumulative displacement measures for each anatomically defined body region, including the head (11-landmark centroid), left and right shoulders, elbows, wrists, hips, knees, ankles, distal hands, and distal feet.</p></list-item>
<list-item>
<p>The global activity index was calculated as the sum of all regional activity indices, providing an integrated measure of overall motor activity across the entire body during the 3-minute storytelling session.</p></list-item>
</list>
<p>These quantitative features were then aggregated for subsequent statistical analysis and machine learning models aimed at differentiating children with elevated hyperactivity risk from their non-hyperactive peers.</p>
</sec>
<sec id="s2_5">
<title>Statistical analysis</title>
<p>Statistical analyses were conducted using IBM SPSS Statistics (Version 27). Group differences in demographic variables were examined using independent-samples <italic>t</italic> tests for age and chi-square tests for sex. For movement-based features, normality was assessed using the Shapiro&#x2013;Wilk test. Depending on distributional characteristics, between-group comparisons were performed using independent-samples <italic>t</italic> tests or Mann&#x2013;Whitney <italic>U</italic> tests. We calculated effect sizes for all univariate group comparisons. For Mann&#x2013;Whitney U tests, effect size was expressed as r and derived from the standardized test statistic and the total sample size. Associations between regional activity indices, the global activity index, and teacher-rated hyperactivity scores were examined using Spearman correlation coefficients. Statistical significance was set at p &lt; 0.05 (two-tailed).</p>
<p>Because multiple, conceptually related motor activity outcomes were examined, we adopted a two-tiered approach to control for multiple comparisons.</p>
<p>As the primary correction strategy, false discovery rate (FDR) control was applied across all univariate movement features included in the main univariate analyses using the Benjamini&#x2013;Hochberg procedure (q = 0.05).</p>
<p>In addition, for anatomically structured sensitivity analyses, movement features were grouped <italic>a priori</italic> into five theoretically and anatomically coherent domains, and family-wise error rate control was performed at the domain level using Bonferroni correction.</p>
<list list-type="roman-lower">
<list-item>
<p>Head domain, represented by a composite head movement index derived from facial landmarks (mhead = 1);</p></list-item>
<list-item>
<p>Right upper limb domain, including right shoulder, right elbow, right wrist, and right hand movement indices (mRUL= 4);</p></list-item>
<list-item>
<p>Left upper limb domain, including left shoulder, left elbow, left wrist, and left hand movement indices (mLUL = 4);</p></list-item>
<list-item>
<p>Right lower limb domain, including right hip, right knee, right ankle, and right foot movement indices (mRLL = 4);</p></list-item>
<list-item>
<p>Left lower limb domain, including left hip, left knee, left ankle, and left foot movement indices (mLLL = 4).</p></list-item>
</list>
<p>Within each domain, Bonferroni correction was applied to adjust for multiple comparisons, with the domain-specific significance threshold defined as &#x3b1; = 0.05 divided by the number of variables within that domain. Accordingly, the adjusted significance level was set at &#x3b1; = 0.0125 for each upper- and lower-limb domain. No correction was applied to the head domain, which comprised a single outcome measure. The global activity index was treated as the primary outcome and was therefore evaluated without correction.</p>
</sec>
<sec id="s2_6">
<title>Machine learning analysis</title>
<p>In addition to group-level statistical comparisons, machine learning approaches were employed to evaluate the discriminative performance of movement-based behavioral features in distinguishing preschool children at risk for ADHD-related hyperactivity from non-hyperactive peers. The feature set consisted of region-specific movement indices derived from pose-based analysis, including the head, bilateral shoulders, elbows, wrists, distal hands, knees, ankles, and distal feet. Hip landmarks were not included as independent features, as movement was quantified relative to a pelvis-based reference frame. The global activity index was also excluded from machine learning analyses to avoid redundancy and potential information leakage, given that it represents a composite of regional movement measures.</p>
<p>Classification performance was quantified using a set of complementary metrics, including accuracy, precision, recall (sensitivity), specificity, F1-score, and the area under the receiver operating characteristic curve (ROC&#x2013;AUC), enabling a robust and multifaceted evaluation of model performance.</p>
<p>Given the tabular structure of the extracted features and the modest sample size, several supervised learning algorithms with complementary strengths were selected. Tree-based ensemble methods&#x2014;including Random Forests (<xref ref-type="bibr" rid="B54">54</xref>), Extremely Randomized Trees (<xref ref-type="bibr" rid="B55">55</xref>), and AdaBoost with decision trees as base estimators (<xref ref-type="bibr" rid="B56">56</xref>)&#x2014;were chosen due to their robustness to feature scaling, ability to model nonlinear relationships, and suitability for small to medium-sized datasets. In addition, Support Vector Machines (SVM) and k-Nearest Neighbors (KNN) classifiers were evaluated, as these distance- and geometry-based methods are well suited for continuous movement features defined within a common coordinate space. For each model, hyperparameters were optimized independently to maximize classification performance.</p>
<p>To mitigate overfitting and obtain an unbiased estimate of model generalization performance, a nested cross-validation scheme was employed for all machine learning models. In this framework, an outer cross-validation loop was used to evaluate model performance, while an inner cross-validation loop within each training fold was applied for hyperparameter optimization. Hyperparameters were selected based on validation performance in the inner loop and subsequently evaluated on held-out test data in the outer loop. Final performance metrics were obtained by averaging results across the outer cross-validation folds. This nested procedure reduces the risk of optimistic bias that can arise when hyperparameters are tuned and evaluated on the same data and is particularly recommended for studies with limited sample sizes.</p>
<p>In the nested cross-validation framework, both the inner (hyperparameter optimization) and outer (performance evaluation) loops employed a stratified 5-fold cross-validation scheme. In each iteration, models were trained on four folds and evaluated on the remaining fold, ensuring that all observations contributed to both training and validation across folds. Performance metrics were averaged across the outer folds to obtain a robust estimate of generalization performance. The use of 5-fold cross-validation provides a favorable trade-off between bias and variance, allowing efficient use of available data while maintaining computational efficiency and stability compared to leave-one-out cross-validation or a single train&#x2013;test split.</p>
<p>Another rationale for selecting tree-based ensemble methods was their inherent robustness to overfitting. By aggregating predictions from multiple weakly correlated decision trees, these models reduce variance and improve generalization performance, particularly in small to moderate-sized datasets.</p>
<p>In the implementation of the Support Vector Machine (SVM) and k-Nearest Neighbors (KNN) classifiers, class imbalance was explicitly addressed through a weighting strategy. Specifically, class weights were assigned inversely proportional to the number of samples in each class, such that minority classes received higher weights and majority classes received lower weights. This approach ensured that misclassification of underrepresented classes incurred a greater penalty during model training, thereby mitigating bias toward majority classes and improving the models&#x2019; ability to learn from imbalanced data distributions.</p>
</sec>
</sec>
<sec id="s3" sec-type="results">
<title>Results</title>
<sec id="s3_1">
<title>Participant characteristics</title>
<p>The final sample consisted of 51 preschool children aged 48&#x2013;60 months, including 20 children in the hyperactivity-risk group and 31 children in the non-hyperactive comparison group. The groups did not differ in age or sex distribution (<xref ref-type="table" rid="T1"><bold>Table&#xa0;1</bold></xref>).</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>Demographic characteristics of the study sample.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Variable</th>
<th valign="middle" align="center">Control (n = 31)</th>
<th valign="middle" align="center">Hyperactivity-risk (n = 20)</th>
<th valign="middle" align="center">Test</th>
<th valign="middle" align="center">P</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Age (months), M (SD)</td>
<td valign="middle" align="left">52.58 (3.40)</td>
<td valign="middle" align="left">52.75 (3.95)</td>
<td valign="middle" align="left">t(49) = &#x2212;0.16</td>
<td valign="middle" align="left">0.87</td>
</tr>
<tr>
<td valign="middle" align="left">Sex (boys/girls)</td>
<td valign="middle" align="left">15/16</td>
<td valign="middle" align="left">14/6</td>
<td valign="middle" align="left">&#x3c7;&#xb2;(1) = 2.32</td>
<td valign="middle" align="left">0.13</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3_2">
<title>Task-related performance</title>
<p>The two groups did not differ significantly in their performance on the story-related comprehension questions. The control group achieved a mean of 2.55 correct responses (SD = 1.36), while the hyperactivity-risk group achieved a mean of 2.20 (SD = 1.32). This difference was not statistically significant (p = 0.37).</p>
</sec>
<sec id="s3_3">
<title>Conners hyperactivity scores and their associations with movement indices</title>
<p>Children in the non-hyperactive comparison group showed low Conners hyperactivity scores (mean = 1.52, SD = 2.86; median = 0), whereas higher scores were observed in the hyperactivity-risk group (mean = 10.80, SD = 6.58; median = 11). Given non-normality in the non-hyperactive comparison group, we used nonparametric analysis. A Mann&#x2013;Whitney U test indicated a significant group difference, with higher hyperactivity scores observed in the hyperactivity-risk group (U = 65.50, p &lt; 0.001).</p>
<p>We conducted Spearman correlation analyses to examine associations between regional movement indices and teacher-rated hyperactivity scores. Left ankle movement showed a positive correlation with hyperactivity scores (&#x3c1; = 0.32, 95% CI [0.05, 0.56], p = 0.022). Similar patterns were observed for right ankle movement (&#x3c1; = 0.28, 95% CI [0.03, 0.51], p = 0.046) and left distal hand movement (&#x3c1; = 0.28, 95% CI [0.04, 0.51], p = 0.049). No other regional movement indices showed statistically significant correlations with hyperactivity ratings (<xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>). Given the exploratory nature of these analyses and the modest sample size, correlation results were not corrected for multiple comparisons. Accordingly, findings are interpreted cautiously, with greater emphasis on the direction of effects rather than on strict statistical significance.</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Spearman correlations between regional movement indices and teacher-rated hyperactivity scores.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Movement region</th>
<th valign="middle" align="center">Spearman&#x2019;s &#x3c1;</th>
<th valign="middle" align="center">P value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Head</td>
<td valign="middle" align="left">0.22</td>
<td valign="middle" align="left">0.117</td>
</tr>
<tr>
<td valign="middle" align="left">Left shoulder</td>
<td valign="middle" align="left">0.11</td>
<td valign="middle" align="left">0.431</td>
</tr>
<tr>
<td valign="middle" align="left">Right shoulder</td>
<td valign="middle" align="left">0.13</td>
<td valign="middle" align="left">0.359</td>
</tr>
<tr>
<td valign="middle" align="left">Left elbow</td>
<td valign="middle" align="left">0.20</td>
<td valign="middle" align="left">0.154</td>
</tr>
<tr>
<td valign="middle" align="left">Right elbow</td>
<td valign="middle" align="left">0.18</td>
<td valign="middle" align="left">0.200</td>
</tr>
<tr>
<td valign="middle" align="left">Left wrist</td>
<td valign="middle" align="left">0.26</td>
<td valign="middle" align="left">0.066</td>
</tr>
<tr>
<td valign="middle" align="left">Right wrist</td>
<td valign="middle" align="left">0.18</td>
<td valign="middle" align="left">0.198</td>
</tr>
<tr>
<td valign="middle" align="left">Left knee</td>
<td valign="middle" align="left">0.26</td>
<td valign="middle" align="left">0.063</td>
</tr>
<tr>
<td valign="middle" align="left">Right knee</td>
<td valign="middle" align="left">0.18</td>
<td valign="middle" align="left">0.218</td>
</tr>
<tr>
<td valign="middle" align="left"><bold>Left ankle</bold></td>
<td valign="middle" align="left"><bold>0.32</bold></td>
<td valign="middle" align="left"><bold>0.022</bold>*</td>
</tr>
<tr>
<td valign="middle" align="left"><bold>Right ankle</bold></td>
<td valign="middle" align="left"><bold>0.28</bold></td>
<td valign="middle" align="left"><bold>0.046</bold>*</td>
</tr>
<tr>
<td valign="middle" align="left"><bold>Left distal hand</bold></td>
<td valign="middle" align="left"><bold>0.28</bold></td>
<td valign="middle" align="left"><bold>0.049</bold>*</td>
</tr>
<tr>
<td valign="middle" align="left">Right distal hand</td>
<td valign="middle" align="left">0.19</td>
<td valign="middle" align="left">0.175</td>
</tr>
<tr>
<td valign="middle" align="left">Left distal foot</td>
<td valign="middle" align="left">0.26</td>
<td valign="middle" align="left">0.066</td>
</tr>
<tr>
<td valign="middle" align="left">Right distal foot</td>
<td valign="middle" align="left">0.23</td>
<td valign="middle" align="left">0.111</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Bold values indicate statistically significant results.</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_4">
<title>Group differences in movement indices</title>
<p>We assessed the normality of regional and global movement indices separately for each group. Across the 17 regional indices and the global activity index, most variables showed significant deviations from normality in at least one group (p &lt; 0.05). Given violations of normality assumptions, we applied nonparametric statistical methods.</p>
<p>We observed significant group differences across multiple movement indices, with consistently higher activity levels in the hyperactivity-risk group. Differences were most pronounced in distal upper- and lower-limb regions, whereas proximal regions showed weaker or non-significant effects. Head movement also differed significantly between groups. This overall pattern was preserved after false discovery rate (FDR) correction (<xref ref-type="table" rid="T3"><bold>Table&#xa0;3</bold></xref>).</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Group differences in regional movement indices between the hyperactivity-risk group and the non-hyperactive comparison group.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="center">Body region/Feature</th>
<th valign="middle" align="center">Test statistic (U)</th>
<th valign="middle" align="center">r value</th>
<th valign="middle" align="center">Raw p-value</th>
<th valign="middle" align="center">FDR-adjusted p-value</th>
<th valign="middle" align="center">Direction</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Head</td>
<td valign="middle" align="left">182.00</td>
<td valign="middle" align="left">0.346</td>
<td valign="middle" align="left">0.014</td>
<td valign="middle" align="left">0.021</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Right shoulder</td>
<td valign="middle" align="left">224.00</td>
<td valign="middle" align="left">0.233</td>
<td valign="middle" align="left">0.097</td>
<td valign="middle" align="left">0.105</td>
<td valign="middle" align="left">n.s.</td>
</tr>
<tr>
<td valign="middle" align="left">Left shoulder</td>
<td valign="middle" align="left">226.00</td>
<td valign="middle" align="left">0.227</td>
<td valign="middle" align="left">0.105</td>
<td valign="middle" align="left">0.104</td>
<td valign="middle" align="left">n.s.</td>
</tr>
<tr>
<td valign="middle" align="left">Right elbow</td>
<td valign="middle" align="left">165.00</td>
<td valign="middle" align="left">0.392</td>
<td valign="middle" align="left">0.005</td>
<td valign="middle" align="left">0.011</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Left elbow</td>
<td valign="middle" align="left">162.00</td>
<td valign="middle" align="left">0.400</td>
<td valign="middle" align="left">0.004</td>
<td valign="middle" align="left">0.012</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Right wrist</td>
<td valign="middle" align="left">183.00</td>
<td valign="middle" align="left">0.345</td>
<td valign="middle" align="left">0.014</td>
<td valign="middle" align="left">0.020</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Left wrist</td>
<td valign="middle" align="left">163.00</td>
<td valign="middle" align="left">0.398</td>
<td valign="middle" align="left">0.005</td>
<td valign="middle" align="left">0.010</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Right distal hand</td>
<td valign="middle" align="left">186.00</td>
<td valign="middle" align="left">0.335</td>
<td valign="middle" align="left">0.017</td>
<td valign="middle" align="left">0.021</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Left distal hand</td>
<td valign="middle" align="left">160.00</td>
<td valign="middle" align="left">0.405</td>
<td valign="middle" align="left">0.004</td>
<td valign="middle" align="left">0.010</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Right knee</td>
<td valign="middle" align="left">197.00</td>
<td valign="middle" align="left">0.305</td>
<td valign="middle" align="left">0.029</td>
<td valign="middle" align="left">0.033</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Left knee</td>
<td valign="middle" align="left">184.00</td>
<td valign="middle" align="left">0.340</td>
<td valign="middle" align="left">0.015</td>
<td valign="middle" align="left">0.021</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Right ankle</td>
<td valign="middle" align="left">118.00</td>
<td valign="middle" align="left">0.518</td>
<td valign="middle" align="left">&lt;0.001</td>
<td valign="middle" align="left">0.001</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Left ankle</td>
<td valign="middle" align="left">124.00</td>
<td valign="middle" align="left">0.503</td>
<td valign="middle" align="left">&lt;0.001</td>
<td valign="middle" align="left">0.001</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Right distal foot</td>
<td valign="middle" align="left">143.00</td>
<td valign="middle" align="left">0.451</td>
<td valign="middle" align="left">0.001</td>
<td valign="middle" align="left">0.005</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
<tr>
<td valign="middle" align="left">Left distal foot</td>
<td valign="middle" align="left">146.00</td>
<td valign="middle" align="left">0.442</td>
<td valign="middle" align="left">0.002</td>
<td valign="middle" align="left">0.007</td>
<td valign="middle" align="left">Hyperactivity-risk &gt; Control</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Differences were assessed using Mann&#x2013;Whitney U tests. Effect sizes are reported as r. False discovery rate (FDR)&#x2013;adjusted p-values are shown. Positive effects indicate higher movement levels in the hyperactivity-risk group.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>Domain-based Bonferroni-corrected analyses revealed significant group differences in multiple distal movement indices, with higher activity levels in the hyperactivity-risk group. In the upper limbs, significant differences were observed for right elbow (p = 0.005), left elbow (p = 0.004), left wrist (p = 0.005), and left distal hand movements (p = 0.004). Shoulder movements, right wrist and right distal hand did not reach statistical significance. In the lower limbs, significant group differences were found for right ankle (p &lt; 0.001), left ankle (p &lt; 0.001), right distal foot (p = 0.001) and left distal foot (p = 0.002). Knee movements did not consistently remain significant after correction.</p>
<p>We observed a significant group difference in the global activity index, reflecting overall motor activity across all body regions during the 3-minute storytelling task. Preschoolers in the hyperactivity-risk group exhibited higher global motor activity than the non-hyperactive comparison group (Mann&#x2013;Whitney U = 155.00, p = 0.003), with higher mean ranks in the hyperactivity-risk group (33.75) compared to controls (21.00).</p>
</sec>
<sec id="s3_5">
<title>Inter-regional correlations of movement indices</title>
<p>To further examine whether apparent right&#x2013;left asymmetries in feature importance could be explained by shared movement variance across body regions, we computed inter-regional correlations among all movement indices. As shown in <xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>, strong bilateral correlations were observed across homologous body segments.</p>
<fig id="f4" position="float">
<label>Figure&#xa0;4</label>
<caption>
<p>Inter-regional movement correlation heatmap. Spearman correlation coefficients illustrating pairwise associations between regional movement indices across all participants.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1769322-g004.tif">
<alt-text content-type="machine-generated">Heatmap showing correlation values among different body parts labeled on both axes, with cell colors representing correlation strength from 0.41 to 1.0. A vertical color bar indicates higher values as darker shades.</alt-text>
</graphic></fig>
</sec>
<sec id="s3_6">
<title>Machine learning classification results</title>
<p>Following statistical analyses, machine learning models were applied to further evaluate the discriminative ability of movement-based behavioral features in distinguishing preschool children at risk for ADHD-related hyperactivity from non-hyperactive peers. Multiple supervised classifiers were tested, including tree-based ensemble methods, k-nearest neighbors, and support vector machines (SVM). Among the evaluated models, the SVM demonstrated the highest and most consistent performance across evaluation metrics and was therefore selected as the primary model for reporting classification results.</p>
<p>Feature importance estimates were computed and are reported in <xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>. These computations were performed within the outer loop of the nested cross-validation procedure. Following completion of the inner cross-validation loop, feature importance was assessed for the best-performing estimator, defined as the model with the optimal set of hyperparameters.</p>
<fig id="f5" position="float">
<label>Figure&#xa0;5</label>
<caption>
<p>Permutation feature importance scores for movement-based features used in the support vector machine classifier. Higher values indicate features that contributed more strongly to distinguishing children in the hyperactivity-risk group from the non-hyperactive comparison group.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1769322-g005.tif">
<alt-text content-type="machine-generated">Horizontal bar chart illustrating permutation importance for various body parts, with right ankle having the highest importance, followed by right distal hand and left ankle, and left elbow showing the lowest importance.</alt-text>
</graphic></fig>
<p>Feature relevance was evaluated using permutation feature importance (PFI) (<xref ref-type="bibr" rid="B54">54</xref>). In this approach, the values of a single feature are randomly permuted across samples while all other features are kept unchanged, thereby disrupting the relationship between that feature and the outcome while preserving its marginal distribution. The trained model is then re-evaluated on the validation set. A substantial decrease in model performance following permutation indicates that the feature contributes meaningfully to prediction accuracy, whereas minimal change suggests limited predictive relevance.</p>
<p>For each feature, an importance score was calculated as the difference between the baseline model performance and the performance obtained after permutation. To enhance robustness, the permutation procedure was repeated multiple times and importance scores were averaged across repetitions. Larger positive values indicate greater feature importance, values close to zero reflect negligible contribution, and negative values suggest that a feature may introduce noise or adversely affect model performance. Taken together, movement features showing significant group differences and associations with teacher-rated hyperactivity severity (<xref ref-type="table" rid="T2"><bold>Table&#xa0;2</bold></xref>) largely overlapped with those contributing most strongly to the machine learning classifier, as reflected by permutation-based feature importance scores (<xref ref-type="fig" rid="f5"><bold>Figure&#xa0;5</bold></xref>).</p>
<p>To further examine the discriminative capacity of the extracted movement-based features, machine learning models were evaluated using a separate 5-fold cross-validation procedure. Performance metrics reflected classification accuracy and are reported with Wilson 95% confidence intervals. Among the tested classifiers, the support vector machine (SVM) achieved the highest performance. The performance metrics of tested classifiers are presented in <xref ref-type="table" rid="T4"><bold>Table&#xa0;4</bold></xref>. Each performance metric is accompanied by Wilson 95% confidence intervals.</p>
<table-wrap id="T4" position="float">
<label>Table&#xa0;4</label>
<caption>
<p>Performance of machine learning models for distinguishing the hyperactivity-risk group from the non-hyperactive comparison group.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="middle" align="left">Model</th>
<th valign="middle" align="left">Accuracy</th>
<th valign="middle" align="left">Precision</th>
<th valign="middle" align="left">Sensitivity</th>
<th valign="middle" align="left">Specificity</th>
<th valign="middle" align="left">F1 score</th>
<th valign="middle" align="left">Mean CV-fold AUC (&#xb1; SD)</th>
<th valign="middle" align="left">Pooled out-of-fold AUC</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="middle" align="left">Random Forest</td>
<td valign="middle" align="left">70.59% (57.0&#x2013;81.3)</td>
<td valign="middle" align="left">60.87% (47.2&#x2013;73.0)</td>
<td valign="middle" align="left">70% (56.4&#x2013;80.8)</td>
<td valign="middle" align="left">70.97% (57.4&#x2013;81.6)</td>
<td valign="middle" align="left">65.12% (51.4&#x2013;76.7)</td>
<td valign="middle" align="left">0.86 &#xb1; 0.07</td>
<td valign="middle" align="left">0.76 (0.62&#x2013;0.90)</td>
</tr>
<tr>
<td valign="middle" align="left">Extra Trees</td>
<td valign="middle" align="left">64.71% (51.0&#x2013;76.4)</td>
<td valign="middle" align="left">54.55% (41.0&#x2013;67.4)</td>
<td valign="middle" align="left">60% (46.3&#x2013;72.3)</td>
<td valign="middle" align="left">67.74% (54.1&#x2013;78.9)</td>
<td valign="middle" align="left">57.14% (43.5&#x2013;69.7)</td>
<td valign="middle" align="left">0.84 &#xb1; 0.03</td>
<td valign="middle" align="left">0.76 (0.62&#x2013;0.90)</td>
</tr>
<tr>
<td valign="middle" align="left">AdaBoost</td>
<td valign="middle" align="left">66.67% (53.0&#x2013;78.0)</td>
<td valign="middle" align="left">57.14% (43.5&#x2013;69.7)</td>
<td valign="middle" align="left">60% (46.3&#x2013;72.3)</td>
<td valign="middle" align="left">70.97% (57.4&#x2013;81.6)</td>
<td valign="middle" align="left">58.54% (44.9&#x2013;71.0)</td>
<td valign="middle" align="left">0.77 &#xb1; 0.12</td>
<td valign="middle" align="left">0.71 (0.56&#x2013;0.86)</td>
</tr>
<tr>
<td valign="middle" align="left">SVM</td>
<td valign="middle" align="left"><bold>84.31% (72.0&#x2013;91.8)</bold></td>
<td valign="middle" align="left"><bold>80% (67.1&#x2013;88.7)</bold></td>
<td valign="middle" align="left"><bold>80% (67.1&#x2013;88.7)</bold></td>
<td valign="middle" align="left"><bold>87.10% (75.3&#x2013;93.7)</bold></td>
<td valign="middle" align="left"><bold>80% (67.1&#x2013;88.7)</bold></td>
<td valign="middle" align="left"><bold>0.91 &#xb1; 0.06</bold></td>
<td valign="middle" align="left"><bold>0.83 (0.71&#x2013;0.95)</bold></td>
</tr>
<tr>
<td valign="middle" align="left">KNN</td>
<td valign="middle" align="left">74.51% (61.1&#x2013;91.8)</td>
<td valign="middle" align="left">64% (50.3&#x2013;75.8)</td>
<td valign="middle" align="left">80% (67.1&#x2013;88.7)</td>
<td valign="middle" align="left">70.97% (57.4&#x2013;81.6)</td>
<td valign="middle" align="left">71.11% (57.5&#x2013;81.7)</td>
<td valign="middle" align="left">0.79 &#xb1; 0.05</td>
<td valign="middle" align="left">0.77 (0.63&#x2013;0.91)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Performance metrics are reported with 95% confidence intervals. Mean cross-validation fold AUC values are presented together with their standard deviations to reflect variability across folds in the nested cross-validation procedure. Pooled out-of-fold AUC values summarize overall discriminative performance across all cross-validation iterations.</p>
<p>Bold values represent the best-performing machine learning model.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>The area under the curve is calculated both as mean AUC per each cross-validation fold, and as pooled out-of fold AUC. Since AUC is a nonlinear rank-based statistic, the mean of fold-wise AUCs differ from the AUC computed from pooled out-of-fold predictions. To that end, they are reported separately, where the mean CV-fold AUC values are presented in mean &#xb1; standard deviation format, while the pooled out-of-fold AUC values are reported along with the corresponding 95% confidence intervals using the Hanley and McNeil (1982) normal approximation based on pooled outer-fold predictions (N<sub>1</sub> = 20 ADHD; N<sub>2</sub> = 31 non-ADHD) (<xref ref-type="bibr" rid="B57">57</xref>). Since multiple variations of SVM and KNN were tested, the most optimal variation is represented here. See <xref ref-type="fig" rid="f6"><bold>Figure&#xa0;6</bold></xref> for ROC curves for all tested classifiers, including each variation of SVM and KNN.</p>
<fig id="f6" position="float">
<label>Figure&#xa0;6</label>
<caption>
<p>Receiver operating characteristic (ROC) curves illustrating the classification performance of the tested machine learning models in distinguishing the hyperactivity-risk group from the non-hyperactive comparison group. Curves are shown for Random Forest (RF), Extra Trees (XTR), AdaBoost (ADA), support vector machine (SVM), and k-nearest neighbors (KNN) classifiers. The diagonal dashed line represents chance-level performance (AUC = 0.50). Reported AUC values and 95% confidence intervals are based on pooled out-of-fold predictions across cross-validation folds.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fpsyt-17-1769322-g006.tif">
<alt-text content-type="machine-generated">Receiver operating characteristic curve comparing multiple machine learning models, including Random Forest, Extra Trees, AdaBoost, SVC with varying C values, and KNN with N equals three and seven. The SVC model with C equals one hundred achieves the highest AUC of zero point eight three, while all models outperform the dashed chance line at AUC equals zero point five.</alt-text>
</graphic></fig>
<p>We trained an auxiliary model including only age and sex and evaluated its performance using the same cross-validation framework as the primary models to assess the potential influence of basic demographic variables. This demographic-only model, which again employed a support vector machine as the classifier, showed limited discriminative ability, with an accuracy of 58.82% (95% CI: 45.2-71.2), a mean CV-fold AUC of 0.59 &#xb1; 0.11 and a pooled out-of-fold AUC of 0.42 (95% CI: 0.26-0.58). These results show substantially lower performance compared to the primary models detailed previously. Together, these findings are consistent with the univariate analyses and indicate that age and sex contribute minimal predictive value in this sample (51 participants), suggesting that the observed model performance is not driven by basic demographic differences.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion">
<title>Discussion</title>
<sec id="s4_1">
<title>Principal findings</title>
<p>This study suggests that movement-based behavioral features extracted from point-of-view (POV) video recordings during a standardized, naturalistic classroom interaction may help differentiate preschool children at risk for hyperactivity from their non-hyperactive peers. To our knowledge, this is the first study to employ teacher-worn POV video recordings combined with automated pose estimation to objectively quantify hyperactivity-related motor behavior in preschool-aged children within an ecologically valid educational context. Despite the two groups being comparable in age and sex, significant differences emerged across multiple body regions, including head, upper limb, and lower limb movement indices, as well as in overall global motor activity. Together, these findings suggest that POV-based, vision-driven assessment approaches hold promise as scalable and low-burden tools for supporting early screening of hyperactivity in preschool settings.</p>
<p>In addition to the observed group-level differences, machine learning analyses yielded complementary evidence suggesting that movement-based behavioral features may carry discriminative information. Among the evaluated classifiers, the support vector machine (SVM) achieved the strongest performance, yielding an overall accuracy of 84.31%, an F1 score of 80%, and an area under the ROC curve of 0.83. These findings indicate that region-specific motor activity patterns extracted from brief, naturalistic point-of-view (POV) recordings may help differentiate preschool children at risk for ADHD-related hyperactivity from their non-hyperactive peers. Importantly, this level of classification performance was obtained using a low-burden, ecologically valid assessment paradigm, highlighting the potential of pose-based behavioral markers to complement traditional symptom-based evaluations in early childhood.</p>
</sec>
<sec id="s4_2">
<title>Comparison with prior work</title>
<p>Hyperactivity is one of the core and most developmentally salient features of Attention-Deficit/Hyperactivity Disorder (ADHD) (<xref ref-type="bibr" rid="B1">1</xref>). In response to the need for more objective assessment approaches, a growing body of research has explored the use of motion-based technologies to quantify hyperactivity-related behavior (<xref ref-type="bibr" rid="B58">58</xref>). Prior studies have employed a range of sensor-based and physiological measurement tools, including wearable motion sensors (<xref ref-type="bibr" rid="B59">59</xref>), accelerometers (<xref ref-type="bibr" rid="B25">25</xref>), IMT devices (<xref ref-type="bibr" rid="B27">27</xref>), multimodal platforms combining motion sensors with neurophysiological measures such as EEG (<xref ref-type="bibr" rid="B60">60</xref>) to capture motor activity patterns associated with ADHD. While these approaches have demonstrated promising associations with hyperactivity severity, they often require specialized equipment, prolonged monitoring periods, or active task engagement, which may limit feasibility and ecological validity in preschool and classroom settings.</p>
<p>More recently, immersive and technology-enhanced assessment paradigms have been introduced to address some of these limitations. For example, Oh et&#xa0;al. (2024) developed a diagnostic tool, AttnKare-D, which uses Virtual Reality (VR) and Artificial Intelligence (AI) to analyze behavioral data collected from children as they performed a series of everyday cognitive and behavioral tasks in a simulated VR environment. These tasks, such as organizing a room, packing a backpack, and planning a schedule, were designed to assess attention, hyperactivity, and impulsivity in contexts mimicking real-life situations. The analysis revealed that children with ADHD exhibited more frequent and prolonged movements in irrelevant spaces, higher frequencies of touching distracting objects, and more impulsive verbal responses compared to typically developing children. The AI model, which translated this multi-faceted behavioral data into a score based on the 18 DSM-5 ADHD symptoms, achieved an area under the curve (AUC) of 0.893 when compared to diagnoses made by child and adolescent psychiatrists, with a sensitivity of 80% and a specificity of 100% at its optimal cut-off score (<xref ref-type="bibr" rid="B23">23</xref>).</p>
<p>Building on VR-based assessment frameworks, immersive virtual reality paradigms have further incorporated eye-tracking technologies to obtain objective indices of attentional functioning in ADHD. Merzon et&#xa0;al. (2022) proposed an objective, technology-assisted diagnostic approach based on virtual reality (VR) and eye tracking to quantify attentional and executive function patterns in children with ADHD within an ecologically valid setting. Using a head-mounted display with an integrated 90 Hz eye tracker, they collected gaze data while participants performed EPELI (Executive Performance in Everyday Living), a VR task simulating everyday activities. The ADHD group exhibited significantly longer fixation durations, shorter saccade durations, and reduced saccade amplitudes compared to typically developing controls. A support vector machine (SVM) classifier trained solely on eye movement features achieved an AUC of 0.92, significantly outperforming classifiers based only on traditional task performance measures or on eye movements from a conventional visual search task (<xref ref-type="bibr" rid="B60">60</xref>).</p>
<p>In parallel, clinically scalable and unobtrusive device-based approaches have been developed to objectively capture hyperactivity through direct measurement of movement during routine clinical encounters. Chang et&#xa0;al. (2023) proposed an objective, device-based diagnostic method utilizing a smart chair embedded with piezoelectric material to quantify movement patterns in children with ADHD during clinical consultations. The study enrolled 31 children with ADHD and 31 age- and sex-matched controls, who were assessed while seated on the chair during routine outpatient visits. Movement signals were analyzed using variance (Var), zero-crossing rate (ZCR), and high-energy rate (HER) metrics. Results demonstrated that all three movement indices were significantly higher in the ADHD group compared to controls. A support vector machine (SVM) classifier achieved an area under the curve (AUC) of 98.00% using variance alone, indicating excellent discriminative power (<xref ref-type="bibr" rid="B61">61</xref>).</p>
<p>More closely aligned with the present study, recent work has demonstrated that non-contact, vision-based approaches can effectively quantify hyperactivity-related motor behavior in clinical contexts. Ouyang et&#xa0;al. (2024) proposed an objective, non-contact diagnostic framework based on skeleton detection and machine learning to quantify movement patterns in children with ADHD during outpatient consultations. Using OpenPose, they extracted 11 skeletal feature descriptors from 4&#x2013;6 minute video recordings and analyzed movement variability in seated children. Among these features, the single descriptor thigh angle demonstrated the highest discriminative power, achieving an accuracy of 91.03%, sensitivity of 90.25%, specificity of 91.86%, and an AUC of 94.00%. By relying on a standard camera and preserving natural clinical interaction, this approach provides a practical and automated aid for distinguishing ADHD from non-ADHD cases based on objective motor activity (<xref ref-type="bibr" rid="B62">62</xref>).</p>
<p>When we consider our findings in the context of prior work, we observe both overlap and divergence in the movement features identified as most informative. While Ouyang et&#xa0;al. reported variability in thigh angle as a key skeletal feature, we found that distal lower and upper limb regions contributed most strongly to classification performance in our permutation-based feature importance analyses. Importantly, similar regions also showed stronger associations with teacher-rated hyperactivity severity, suggesting convergence between symptom-related behavioral variation and the features driving machine learning classification. At the same time, both studies point to the relevance of lower-limb involvement, indicating that leg-related movement may be an important component of hyperactivity-related motor behavior. We believe that differences in the specific regions highlighted across studies are likely related to contextual factors. Whereas Ouyang et&#xa0;al. examined seated behavior in structured outpatient settings, our paradigm involved children interacting in a familiar school environment during a naturalistic storytelling task. This may have encouraged more spontaneous fidgeting and distal limb movements and, in turn, influenced the relative importance of specific movement features.</p>
<p>Beyond movement-based markers, we note that a growing body of research suggests physiological signals reflecting autonomic regulation may also support ADHD prediction. Recent studies have shown that multimodal physiological features&#x2014;such as electrodermal activity (EDA), heart-rate variability (HRV), and skin temperature&#x2014;can contribute to machine-learning&#x2013;based classification and may provide information that complements overt motor behavior (<xref ref-type="bibr" rid="B63">63</xref>). At the same time, we recognize that rapid advances in flexible, skin-conformal, and low-burden wearable technologies are improving the feasibility of comfortable, longer-term physiological monitoring in everyday environments (<xref ref-type="bibr" rid="B64">64</xref>). These developments may enable future multimodal screening frameworks that integrate behavioral and physiological signals.</p>
<p>A recent comprehensive review on ADHD detection approaches highlighted that objective measurement methods offer promising, cost-effective, and accessible tools for supporting ADHD assessment. Nevertheless, the review emphasized that such methods should be employed with caution, as only a subset of ADHD symptoms&#x2014;primarily those related to hyperactivity&#x2014;can be captured by motion-based measures, and therefore these approaches should not be considered sufficient as standalone diagnostic tools (<xref ref-type="bibr" rid="B65">65</xref>). In the preschool period, however, hyperactivity and impulsivity constitute the most prominent and developmentally salient manifestations of ADHD (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B66">66</xref>). Accordingly, objective assessment strategies that prioritize ecologically valid measurement of motor activity may be especially well suited to preschool populations and yield more informative screening outcomes in early childhood ADHD.</p>
<p>Our study extends this growing body of work by addressing several key gaps in the existing literature. Whereas many prior objective assessment approaches have been developed and validated primarily in clinical settings, often involving school-aged children (typically 7 years and older), wearable devices, or highly structured experimental tasks, the present study focuses on preschool-aged children and captures behavior within a familiar educational environment. Previous methods frequently require children to wear sensors, engage with specialized equipment, or perform artificial tasks under test-like conditions, which may alter natural behavior and limit ecological validity&#x2014;particularly in younger populations.</p>
<p>In contrast, our approach leverages a brief, everyday storytelling interaction conducted in the classroom, without requiring the child to wear any devices or attend a clinical setting. By embedding assessment within a naturalistic, socially meaningful activity and using teacher-worn point-of-view video to unobtrusively capture movement, the present method minimizes burden, reduces reactivity, and preserves children&#x2019;s comfort. This design allows objective quantification of hyperactivity-related motor behavior under conditions that closely resemble daily preschool experiences. Such an approach is particularly relevant given that hyperactivity symptoms tend to predominate over inattention during the preschool years, underscoring the potential value of ecologically valid, movement-focused measures for early screening and risk identification in young children.</p>
</sec>
<sec id="s4_3">
<title>Limitations</title>
<p>Several methodological considerations should be taken into account when interpreting the present findings. First, although computer-vision&#x2013;based pose estimation provides an objective and scalable approach to quantifying motor activity, movement indices derived from monocular POV recordings may not perfectly capture children&#x2019;s true motor behavior. Minor measurement inaccuracies related to tracking precision, occlusion, or camera perspective are unavoidable in naturalistic settings and may have introduced noise into the extracted features.</p>
<p>Second, the relatively modest sample size limits generalizability and may have reduced statistical power, particularly for detecting more subtle regional effects. Although nested cross-validation was employed to mitigate optimistic bias in the machine learning analyses, such bias cannot be fully excluded in small-sample settings. In addition, permutation-based feature importance estimates may be unstable under these conditions, and no external validation cohort was available. Replication in larger samples with independent test sets will therefore be essential to establish model stability and clinical relevance. Also, although multiple supervised classifiers were evaluated to provide a broad comparison, not all models are equally well suited to small-sample settings. In particular, instance-based and boosting approaches may exhibit greater variability under limited data conditions. Accordingly, results from these models were considered exploratory. For this reason, primary interpretation was focused on models with more stable and theoretically aligned behavior, such as support vector machines and tree-based ensembles.</p>
<p>Third, behavioral data were collected during a single brief one-on-one interaction, precluding assessment of intra-individual stability or test&#x2013;retest reliability. The findings should therefore be interpreted as reflecting between-group differences at a single time point. Moreover, individual testing may not fully reflect children&#x2019;s typical activity levels in classroom contexts, as some children may modulate their behavior during individualized interactions. Future studies incorporating repeated measurements and group-based classroom recordings would improve ecological validity.</p>
<p>Fourth, several feature-related considerations warrant caution. Because movement features were quantified as cumulative linear displacements in a pelvis-centered coordinate system, the resulting measures are inherently scale-dependent and may be influenced by anthropometric variability, such as limb length. Although the age range was relatively narrow, residual body-size effects cannot be excluded. In addition, root-centered representations emphasize relative segmental motion and may underrepresent large-scale postural translations involving concurrent pelvis displacement. Future work may benefit from incorporating normalization procedures, angular kinematics, or global motion descriptors.</p>
<p>Finally, potential confounding factors were not systematically assessed. Handedness was not formally measured and therefore could not be included as a covariate in the analyses. Because lateralized motor behavior in early childhood is influenced by emerging hand dominance, some left&#x2013;right differences in movement indices may partly reflect individual dominance rather than hyperactivity-related motor patterns. In line with this, inter-regional movement analyses revealed strong bilateral correlations across homologous body segments (<xref ref-type="fig" rid="f4"><bold>Figure&#xa0;4</bold></xref>), suggesting that apparent right-sided differences in feature importance likely reflect shared variance among correlated movement patterns rather than true lateralized motor dominance. Taken together, the pattern of results showed increased movement across multiple body regions and both sides of the body, which is more consistent with a generalized elevation in motor activity. Likewise, transient factors such as sleep quality, nutrition, or emotional state were not controlled. Future studies should include standardized measures of handedness and explicitly examine lateralized movement features, along with other transient influences such as sleep quality, nutrition, or emotional state, to better isolate symptom-related motor behavior.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions">
<title>Conclusions</title>
<p>This study demonstrates that movement-based features extracted from teacher-worn point-of-view video recordings during a brief, naturalistic classroom interaction may help differentiate preschool children at risk for ADHD-related hyperactivity from their non-hyperactive peers. By combining ecologically valid data collection with automated pose estimation and machine learning, the proposed approach offers an objective, low-burden, and scalable method for quantifying hyperactivity in early childhood. Although not intended as a standalone diagnostic tool, this framework shows promise as a complementary screening approach that may support early identification efforts in preschool settings. Future studies with larger and more diverse samples are warranted to validate these findings and further explore the clinical and educational utility of POV-based behavioral assessment.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p></sec>
<sec id="s7" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Non-Interventional Research Ethics Committee of Zonguldak B&#xfc;lent Ecevit University. The studies were conducted in accordance with the local legislation and institutional requirements. Written informed consent for participation in this study was provided by the participants&#x2019; legal guardians/next of kin. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p></sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>HK: Investigation, Conceptualization, Funding acquisition, Writing &#x2013; review &amp; editing, Writing &#x2013; original draft, Methodology, Visualization, Data curation. CG: Writing &#x2013; review &amp; editing, Resources, Writing &#x2013; original draft, Formal analysis, Software, Validation, Data curation, Supervision, Investigation.</p></sec>
<ack>
<title>Acknowledgments</title>
<p>The authors gratefully acknowledge the preschool teacher for her assistance with the standardized storytelling sessions and data collection. We also thank the children and their families for their participation in the study.</p>
</ack>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p></sec>
<sec id="s11" sec-type="ai-statement">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p></sec>
<sec id="s12" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p></sec>
<sec id="s13" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fpsyt.2026.1769322/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fpsyt.2026.1769322/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Supplementaryfile1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author"><collab>American Psychiatric Association</collab>
</person-group>. 
<article-title>Diagnostic and statistical manual of mental disorders</article-title>. (<year>2022</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.1176/appi.books.9780890425787</pub-id>, PMID: <pub-id pub-id-type="pmid">38300502</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<label>2</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Sayal</surname> <given-names>K</given-names></name>
<name><surname>Prasad</surname> <given-names>V</given-names></name>
<name><surname>Daley</surname> <given-names>D</given-names></name>
<name><surname>Ford</surname> <given-names>T</given-names></name>
<name><surname>Coghill</surname> <given-names>D</given-names></name>
</person-group>. 
<article-title>ADHD in children and young people: Prevalence, care pathways, and service provision</article-title>. <source>Lancet Psychiatry</source>. (<year>2017</year>) <volume>4</volume>:<page-range>175&#x2013;86</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2215-0366(17)30167-0</pub-id>, PMID: <pub-id pub-id-type="pmid">29033005</pub-id>
</mixed-citation>
</ref>
<ref id="B3">
<label>3</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Mohammadi</surname> <given-names>MR</given-names></name>
<name><surname>Zarafshan</surname> <given-names>H</given-names></name>
<name><surname>Khaleghi</surname> <given-names>A</given-names></name>
<name><surname>Ahmadi</surname> <given-names>N</given-names></name>
<name><surname>Hooshyari</surname> <given-names>Z</given-names></name>
<name><surname>Mostafavi</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>Prevalence of ADHD and its comorbidities in a population-based sample</article-title>. <source>J Attn Disord</source>. (<year>2019</year>) <volume>25</volume>:<page-range>1058&#x2013;67</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/1087054719886372</pub-id>, PMID: <pub-id pub-id-type="pmid">31833803</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<label>4</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Furman</surname> <given-names>L</given-names></name>
</person-group>. 
<article-title>What is attention-deficit hyperactivity disorder (ADHD)</article-title>? <source>J Child Neurol</source>. (<year>2005</year>) <volume>20</volume>:<fpage>994</fpage>&#x2013;<lpage>1002</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/08830738050200121301</pub-id>, PMID: <pub-id pub-id-type="pmid">16417850</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<label>5</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Thomas</surname> <given-names>R</given-names></name>
<name><surname>Sanders</surname> <given-names>S</given-names></name>
<name><surname>Doust</surname> <given-names>J</given-names></name>
<name><surname>Beller</surname> <given-names>E</given-names></name>
<name><surname>Glasziou</surname> <given-names>P</given-names></name>
</person-group>. 
<article-title>Prevalence of attention-deficit/hyperactivity disorder: A systematic review and meta-analysis</article-title>. <source>Pediatrics</source>. (<year>2015</year>) <volume>135</volume>:<fpage>e994</fpage>&#x2013;<lpage>e1001</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1542/peds.2014-3482</pub-id>, PMID: <pub-id pub-id-type="pmid">25733754</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<label>6</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Danielson</surname> <given-names>ML</given-names></name>
<name><surname>Claussen</surname> <given-names>AH</given-names></name>
<name><surname>Bitsko</surname> <given-names>RH</given-names></name>
<name><surname>Katz</surname> <given-names>SJ</given-names></name>
<name><surname>Newsome</surname> <given-names>K</given-names></name>
<name><surname>Blumberg</surname> <given-names>S</given-names></name>
<etal/>
</person-group>. 
<article-title>ADHD prevalence among U.S. children and adolescents in 2022: Diagnosis, severity, co-occurring disorders, and treatment</article-title>. <source>J Clin Child Adolesc Psychol</source>. (<year>2024</year>) <volume>53</volume>:<page-range>343&#x2013;60</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1080/15374416.2024.2335625</pub-id>, PMID: <pub-id pub-id-type="pmid">38778436</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<label>7</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Canals</surname> <given-names>J</given-names></name>
<name><surname>Morales-Hidalgo</surname> <given-names>P</given-names></name>
<name><surname>Jan&#xe9;</surname> <given-names>M</given-names></name>
<name><surname>Dom&#xe9;nech</surname> <given-names>E</given-names></name>
</person-group>. 
<article-title>ADHD prevalence in Spanish preschoolers: Comorbidity, socio-demographic factors, and functional consequences</article-title>. <source>J Attn Disord</source>. (<year>2018</year>) <volume>22</volume>:<page-range>143&#x2013;53</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/1087054716638511</pub-id>, PMID: <pub-id pub-id-type="pmid">27009923</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<label>8</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Younis</surname> <given-names>E</given-names></name>
<name><surname>Shalaby</surname> <given-names>S</given-names></name>
<name><surname>Abdo</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Screening of attention deficit hyperactivity disorder among preschool children in Gharbia Governorate, Egypt</article-title>. <source>BMC Psychiatry</source>. (<year>2023</year>) <volume>23</volume>:<fpage>472</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12888-023-04785-x</pub-id>, PMID: <pub-id pub-id-type="pmid">37098572</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<label>9</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Nomura</surname> <given-names>K</given-names></name>
<name><surname>Okada</surname> <given-names>K</given-names></name>
<name><surname>Noujima</surname> <given-names>Y</given-names></name>
<name><surname>Kojima</surname> <given-names>S</given-names></name>
<name><surname>Mori</surname> <given-names>Y</given-names></name>
<name><surname>Amano</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>A clinical study of attention-deficit/hyperactivity disorder in preschool children&#x2014;Prevalence and differential diagnoses</article-title>. <source>Brain Dev</source>. (<year>2014</year>) <volume>36</volume>:<page-range>778&#x2013;85</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.braindev.2013.11.004</pub-id>, PMID: <pub-id pub-id-type="pmid">24295540</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<label>10</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Molina-Torres</surname> <given-names>J</given-names></name>
<name><surname>Orgil&#xe9;s</surname> <given-names>M</given-names></name>
<name><surname>Servera</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>El TDAH en la etapa preescolar: Una revisi&#xf3;n narrativa</article-title>. <source>Rev Psicolog&#xed;a Cl&#xed;nica con Ni&#xf1;os y Adolescentes</source>. (<year>2022</year>) <volume>9</volume>:<fpage>1</fpage>&#x2013;<lpage>10</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.21134/rpcna.2022.09.3.5</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<label>11</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wolraich</surname> <given-names>ML</given-names></name>
<name><surname>Hagan</surname> <given-names>JF</given-names></name>
<name><surname>Allan</surname> <given-names>C</given-names></name>
<name><surname>Chan</surname> <given-names>E</given-names></name>
<name><surname>Davison</surname> <given-names>D</given-names></name>
<name><surname>Earls</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>Clinical practice guideline for the diagnosis, evaluation, and treatment of attention-deficit/hyperactivity disorder in children and adolescents</article-title>. <source>Pediatrics</source>. (<year>2019</year>) <volume>144</volume>:<fpage>e20192528</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1542/peds.2019-2528</pub-id>, PMID: <pub-id pub-id-type="pmid">31570648</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<label>12</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>DuPaul</surname> <given-names>GJ</given-names></name>
<name><surname>McGoey</surname> <given-names>KE</given-names></name>
<name><surname>Eckert</surname> <given-names>TL</given-names></name>
<name><surname>VanBrakle</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>Preschool children with attention-deficit/hyperactivity disorder: Impairments in behavioral, social, and school functioning</article-title>. <source>J Am Acad Child Adolesc Psychiatry</source>. (<year>2001</year>) <volume>40</volume>:<page-range>508&#x2013;15</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/00004583-200105000-00009</pub-id>, PMID: <pub-id pub-id-type="pmid">11349694</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<label>13</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Conners</surname> <given-names>CK</given-names></name>
<name><surname>Sitarenios</surname> <given-names>G</given-names></name>
<name><surname>Parker</surname> <given-names>JDA</given-names></name>
<name><surname>Epstein</surname> <given-names>JN</given-names></name>
</person-group>. 
<article-title>Revision and restandardization of the Conner Teacher Rating Scale (CTRS-R): Factor structure, reliability, and criterion validity</article-title>. <source>J Abnormal Child Psychol</source>. (<year>1998</year>) <volume>26</volume>:<page-range>279&#x2013;91</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1023/A:1022606501530</pub-id>, PMID: <pub-id pub-id-type="pmid">9700520</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<label>14</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>McGoey</surname> <given-names>KE</given-names></name>
<name><surname>DuPaul</surname> <given-names>GJ</given-names></name>
<name><surname>Haley</surname> <given-names>E</given-names></name>
<name><surname>Shelton</surname> <given-names>T</given-names></name>
</person-group>. 
<article-title>Parent and teacher ratings of attention-deficit/hyperactivity disorder in preschool: The ADHD Rating Scale-IV Preschool Version</article-title>. <source>J Psychopathol Behav Assess</source>. (<year>2007</year>) <volume>29</volume>:<page-range>269&#x2013;76</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10862-007-9048-y</pub-id>, PMID: <pub-id pub-id-type="pmid">41761023</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<label>15</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Overgaard</surname> <given-names>KR</given-names></name>
<name><surname>Oerbeck</surname> <given-names>B</given-names></name>
<name><surname>Friis</surname> <given-names>S</given-names></name>
<name><surname>Biele</surname> <given-names>G</given-names></name>
<name><surname>Pripp</surname> <given-names>AH</given-names></name>
<name><surname>Aase</surname> <given-names>H</given-names></name>
<etal/>
</person-group>. 
<article-title>Screening with an ADHD-specific rating scale in preschoolers: A cross-cultural comparison of the Early Childhood Inventory-4</article-title>. <source>psychol Assess</source>. (<year>2019</year>) <volume>31</volume>:<page-range>678&#x2013;88</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1037/pas0000722</pub-id>, PMID: <pub-id pub-id-type="pmid">30958025</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<label>16</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kooij</surname> <given-names>JJS</given-names></name>
<name><surname>Bijlenga</surname> <given-names>D</given-names></name>
<name><surname>Salerno</surname> <given-names>L</given-names></name>
<name><surname>Jaeschke</surname> <given-names>R</given-names></name>
<name><surname>Bitter</surname> <given-names>I</given-names></name>
<name><surname>Bal&#xe1;zs</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Updated European Consensus Statement on diagnosis and treatment of adult ADHD</article-title>. <source>Eur Psychiatry</source>. (<year>2019</year>) <volume>56</volume>:<fpage>14</fpage>&#x2013;<lpage>34</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.eurpsy.2018.11.001</pub-id>, PMID: <pub-id pub-id-type="pmid">30453134</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<label>17</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Rodr&#xed;guez</surname> <given-names>C</given-names></name>
<name><surname>Areces</surname> <given-names>D</given-names></name>
<name><surname>Garc&#xed;a</surname> <given-names>T</given-names></name>
<name><surname>Cueli</surname> <given-names>M</given-names></name>
<name><surname>Gonz&#xe1;lez-Castro</surname> <given-names>P</given-names></name>
</person-group>. 
<article-title>Comparison between two continuous performance tests for identifying ADHD: Traditional vs. virtual reality</article-title>. <source>Int J Clin Health Psychol</source>. (<year>2018</year>) <volume>18</volume>:<page-range>254&#x2013;63</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ijchp.2018.06.003</pub-id>, PMID: <pub-id pub-id-type="pmid">30487931</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<label>18</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Breaux</surname> <given-names>RP</given-names></name>
<name><surname>Griffith</surname> <given-names>SF</given-names></name>
<name><surname>Harvey</surname> <given-names>EA</given-names></name>
</person-group>. 
<article-title>Preschool neuropsychological measures as predictors of later attention deficit hyperactivity disorder</article-title>. <source>J Abnorm Child Psychol</source>. (<year>2016</year>) <volume>44</volume>:<page-range>1455&#x2013;71</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10802-016-0140-1</pub-id>, PMID: <pub-id pub-id-type="pmid">26936037</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<label>19</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>&#xd6;ztekin</surname> <given-names>I</given-names></name>
<name><surname>Finlayson</surname> <given-names>MA</given-names></name>
<name><surname>Graziano</surname> <given-names>PA</given-names></name>
<name><surname>Dick</surname> <given-names>AS</given-names></name>
</person-group>. 
<article-title>Is there any incremental benefit to conducting neuroimaging and neurocognitive assessments in the diagnosis of ADHD in young children? A machine learning investigation</article-title>. <source>Dev Cognit Neurosci</source>. (<year>2021</year>) <volume>49</volume>:<elocation-id>100966</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.dcn.2021.100966</pub-id>, PMID: <pub-id pub-id-type="pmid">34044207</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<label>20</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Beriha</surname> <given-names>SS</given-names></name>
</person-group>. 
<article-title>Computer aided diagnosis system to distinguish ADHD from similar behavioral disorders</article-title>. <source>Biomed Pharmacol J</source>. (<year>2018</year>) <volume>11</volume>:<page-range>1135&#x2013;41</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.13005/bpj/1474</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<label>21</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Markovska-Simoska</surname> <given-names>S</given-names></name>
<name><surname>Pop-Jordanova</surname> <given-names>N</given-names></name>
</person-group>. 
<article-title>Quantitative EEG in children and adults with attention deficit hyperactivity disorder: comparison of absolute and relative power spectra and theta/beta ratio</article-title>. <source>Clin EEG Neurosci</source>. (<year>2017</year>) <volume>48</volume>:<fpage>20</fpage>&#x2013;<lpage>32</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/1550059416643824</pub-id>, PMID: <pub-id pub-id-type="pmid">27170672</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<label>22</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ta&#x15f;p&#x131;nar</surname> <given-names>G</given-names></name>
<name><surname>Ozkurt</surname> <given-names>N</given-names></name>
</person-group>. 
<article-title>A review of ADHD detection studies with machine learning methods using rsfMRI data</article-title>. <source>NMR Biomedicine</source>. (<year>2024</year>) <volume>37</volume>:<fpage>e5138</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/nbm.5138</pub-id>, PMID: <pub-id pub-id-type="pmid">38472163</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<label>23</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Oh</surname> <given-names>S</given-names></name>
<name><surname>Joung</surname> <given-names>YS</given-names></name>
<name><surname>Chung</surname> <given-names>TM</given-names></name>
<name><surname>Lee</surname> <given-names>J</given-names></name>
<name><surname>Seok</surname> <given-names>BJ</given-names></name>
<name><surname>Kim</surname> <given-names>N</given-names></name>
<etal/>
</person-group>. 
<article-title>Diagnosis of ADHD using virtual reality and artificial intelligence: an exploratory study of clinical applications</article-title>. <source>Front Psychiatry</source>. (<year>2024</year>) <volume>15</volume>:<elocation-id>1383547</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpsyt.2024.1383547</pub-id>, PMID: <pub-id pub-id-type="pmid">38887727</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<label>24</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Peterson</surname> <given-names>B</given-names></name>
<name><surname>Trampush</surname> <given-names>JW</given-names></name>
<name><surname>Brown</surname> <given-names>M</given-names></name>
<name><surname>Maglione</surname> <given-names>M</given-names></name>
<name><surname>Bolshakova</surname> <given-names>M</given-names></name>
<name><surname>Rozelle</surname> <given-names>M</given-names></name>
<etal/>
</person-group>. 
<article-title>Tools for the diagnosis of ADHD in children and adolescents: A systematic review</article-title>. <source>Pediatrics</source>. (<year>2024</year>) <volume>153</volume>:<fpage>e2024065854</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1542/peds.2024-065854</pub-id>, PMID: <pub-id pub-id-type="pmid">38523599</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<label>25</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Welk</surname> <given-names>GJ</given-names></name>
<name><surname>Schaben</surname> <given-names>JA</given-names></name>
<name><surname>Morrow</surname> <given-names>JR</given-names> <suffix>Jr</suffix></name>
</person-group>. 
<article-title>Reliability of accelerometry-based activity monitors: a generalizability study</article-title>. <source>Med Sci sports Exercise</source>. (<year>2004</year>) <volume>36</volume>:<page-range>1637&#x2013;45</page-range>.
</mixed-citation>
</ref>
<ref id="B26">
<label>26</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Trost</surname> <given-names>SG</given-names></name>
<name><surname>Pate</surname> <given-names>RR</given-names></name>
<name><surname>Freedson</surname> <given-names>PS</given-names></name>
<name><surname>Sallis</surname> <given-names>JF</given-names></name>
<name><surname>Taylor</surname> <given-names>WC</given-names></name>
</person-group>. 
<article-title>Using objective physical activity measures with youth: How many days of monitoring are needed</article-title>? <source>Med Sci Sports Exercise</source>. (<year>2000</year>) <volume>32</volume>:<page-range>426&#x2013;31</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/00005768-200002000-00025</pub-id>, PMID: <pub-id pub-id-type="pmid">10694127</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<label>27</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Teicher</surname> <given-names>MH</given-names></name>
<name><surname>Ito</surname> <given-names>Y</given-names></name>
<name><surname>Glod</surname> <given-names>CA</given-names></name>
<name><surname>Barber</surname> <given-names>NI</given-names></name>
</person-group>. 
<article-title>Objective measurement of hyperactivity and attentional problems in ADHD</article-title>. <source>J Am Acad Child Adolesc Psychiatry</source>. (<year>1996</year>) <volume>35</volume>:<page-range>334&#x2013;42</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/00004583-199603000-00015</pub-id>, PMID: <pub-id pub-id-type="pmid">8714322</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<label>28</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wehrmann</surname> <given-names>T</given-names></name>
<name><surname>M&#xfc;ller</surname> <given-names>JM</given-names></name>
</person-group>. 
<article-title>An objective measure of hyperactivity aspects with compressed webcam video</article-title>. <source>Child Adolesc Psychiatry Ment Health</source>. (<year>2015</year>) <volume>9</volume>:<fpage>45</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13034-015-0076-1</pub-id>, PMID: <pub-id pub-id-type="pmid">26361496</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<label>29</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chiu</surname> <given-names>YH</given-names></name>
<name><surname>Lee</surname> <given-names>YH</given-names></name>
<name><surname>Wang</surname> <given-names>SY</given-names></name>
</person-group>. 
<article-title>Objective approach to diagnosing attention deficit hyperactivity disorder by using pixel subtraction and machine learning classification of outpatient consultation videos</article-title>. <source>J Neurodev Disord</source>. (<year>2024</year>) <volume>16</volume>:<fpage>71</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s11689-024-09588-z</pub-id>, PMID: <pub-id pub-id-type="pmid">39716052</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<label>30</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Daniels</surname> <given-names>J</given-names></name>
<name><surname>Schwartz</surname> <given-names>J</given-names></name>
<name><surname>Voss</surname> <given-names>C</given-names></name>
<name><surname>Haber</surname> <given-names>N</given-names></name>
<name><surname>Fazel</surname> <given-names>A</given-names></name>
<name><surname>Kline</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Exploratory study examining the at-home feasibility of a wearable tool for social-affective learning in children with autism</article-title>. <source>NPJ Digital Med</source>. (<year>2018</year>) <volume>1</volume>:<fpage>32</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41746-018-0035-3</pub-id>, PMID: <pub-id pub-id-type="pmid">31304314</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<label>31</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kouo</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>The effectiveness of a packaged intervention including point-of-view video modeling in teaching social initiation skills to children with autism spectrum disorders</article-title>. <source>Focus Autism Other Dev Disabil</source>. (<year>2019</year>) <volume>34</volume>:<page-range>141&#x2013;52</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/1088357618815887</pub-id>, PMID: <pub-id pub-id-type="pmid">41756066</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<label>32</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hine</surname> <given-names>J</given-names></name>
<name><surname>Wolery</surname> <given-names>M</given-names></name>
</person-group>. 
<article-title>Using point-of-view video modeling to teach play to preschoolers with autism</article-title>. <source>Topics Early Childhood Special Educ</source>. (<year>2006</year>) <volume>26</volume>:<fpage>83</fpage>&#x2013;<lpage>93</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/02711214060260020301</pub-id>, PMID: <pub-id pub-id-type="pmid">41756066</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<label>33</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Edmunds</surname> <given-names>SR</given-names></name>
<name><surname>Rozga</surname> <given-names>A</given-names></name>
<name><surname>Li</surname> <given-names>Y</given-names></name>
<name><surname>Karp</surname> <given-names>EA</given-names></name>
<name><surname>Ibanez</surname> <given-names>LV</given-names></name>
<name><surname>Rehg</surname> <given-names>JM</given-names></name>
<etal/>
</person-group>. 
<article-title>Brief report: using a point-of-view camera to measure eye gaze in young children with autism spectrum disorder during naturalistic social interactions: a pilot study</article-title>. <source>J Autism Dev Disord</source>. (<year>2017</year>) <volume>47</volume>:<fpage>898</fpage>&#x2013;<lpage>904</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10803-016-3002-3</pub-id>, PMID: <pub-id pub-id-type="pmid">28070783</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<label>34</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ahn</surname> <given-names>YA</given-names></name>
<name><surname>Moffitt</surname> <given-names>JM</given-names></name>
<name><surname>Tao</surname> <given-names>Y</given-names></name>
<name><surname>Custode</surname> <given-names>S</given-names></name>
<name><surname>Parlade</surname> <given-names>M</given-names></name>
<name><surname>Beaumont</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>Objective measurement of social gaze and smile behaviors in children with suspected autism spectrum disorder during administration of the autism diagnostic observation schedule, 2nd edition</article-title>. <source>J Autism Dev Disord</source>. (<year>2024</year>) <volume>54</volume>:<page-range>2124&#x2013;37</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10803-023-05990-z</pub-id>, PMID: <pub-id pub-id-type="pmid">37103660</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<label>35</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kay&#x131;&#x15f;</surname> <given-names>H</given-names></name>
<name><surname>&#xc7;elik</surname> <given-names>M</given-names></name>
<name><surname>Gedizlio&#x11f;lu</surname> <given-names>&#xc7;</given-names></name>
<name><surname>Kay&#x131;&#x15f;</surname> <given-names>E</given-names></name>
<name><surname>Aydemir</surname> <given-names>C</given-names></name>
<name><surname>Hatipo&#x11f;lu</surname> <given-names>A</given-names></name>
<etal/>
</person-group>. 
<article-title>A new approach in autism diagnosis: Evaluating natural interaction using point of view (POV) glasses</article-title>. <source>Asian J Psychiatry</source>. (<year>2026</year>) <volume>116</volume>:<elocation-id>104798</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ajp.2025.104798</pub-id>, PMID: <pub-id pub-id-type="pmid">41456454</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<label>36</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kay&#x131;&#x15f;</surname> <given-names>H</given-names></name>
<name><surname>&#xc7;elik</surname> <given-names>M</given-names></name>
<name><surname>&#xc7;ak&#x131;r Karde&#x15f;</surname> <given-names>V</given-names></name>
<name><surname>Karabulut</surname> <given-names>HA</given-names></name>
<name><surname>&#xd6;zkan</surname> <given-names>E</given-names></name>
<name><surname>Gedizlio&#x11f;lu</surname> <given-names>&#xc7;</given-names></name>
<etal/>
</person-group>. 
<article-title>A novel approach to depression detection using POV glasses and machine learning for multimodal analysis</article-title>. <source>Front Psychiatry</source>. (<year>2025</year>) <volume>16</volume>:<elocation-id>1720990</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fpsyt.2025.1720990</pub-id>, PMID: <pub-id pub-id-type="pmid">41293203</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<label>37</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Fantuzzo</surname> <given-names>J</given-names></name>
<name><surname>Grim</surname> <given-names>S</given-names></name>
<name><surname>Mordell</surname> <given-names>M</given-names></name>
<name><surname>McDermott</surname> <given-names>P</given-names></name>
<name><surname>Miller</surname> <given-names>L</given-names></name>
<name><surname>Coolahan</surname> <given-names>K</given-names></name>
</person-group>. 
<article-title>A multivariate analysis of the Revised Conners&#x2019; Teacher Rating Scale with low-income, urban preschool children</article-title>. <source>J Abnormal Child Psychol</source>. (<year>2001</year>) <volume>29</volume>:<page-range>141&#x2013;52</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1023/A:1005236113655</pub-id>, PMID: <pub-id pub-id-type="pmid">11321629</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<label>38</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Gerhardstein</surname> <given-names>RR</given-names></name>
<name><surname>Lonigan</surname> <given-names>CJ</given-names></name>
</person-group>. 
<article-title>Factor structure of the Conners&#x2019; Teacher Rating Scale-Short Form in a low-income preschool sample</article-title>. <source>J Psychoeduc Assess</source>. (<year>2003</year>) <volume>21</volume>:<page-range>223&#x2013;43</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/073428290302100301</pub-id>, PMID: <pub-id pub-id-type="pmid">41756066</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<label>39</label>
<mixed-citation publication-type="web">
<person-group person-group-type="author">
<name><surname>Kaner</surname> <given-names>S</given-names></name>
<name><surname>B&#xfc;y&#xfc;k&#xf6;zt&#xfc;rk</surname> <given-names>&#x15e;</given-names></name>
<name><surname>&#x130;&#x15f;eri</surname> <given-names>E</given-names></name>
</person-group>. 
<article-title>Conners &#xf6;&#x11f;retmen dereceleme &#xf6;l&#xe7;e&#x11f;i-yenilenmi&#x15f; k&#x131;sa: T&#xfc;rkiye uyarlama &#xe7;al&#x131;&#x15f;mas&#x131;</article-title> (<year>2013</year>). Available online at: <uri xlink:href="http://hdl.handle.net/11129/2594">http://hdl.handle.net/11129/2594</uri> (Accessed <date-in-citation content-type="access-date">April 25, 2025</date-in-citation>).
</mixed-citation>
</ref>
<ref id="B40">
<label>40</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bakanl&#x131;&#x11f;&#x131;</surname> <given-names>ME&#x11f;itim</given-names></name>
</person-group>. 
<article-title>Yolculuk oyunu</article-title>. <source>MEB Yay&#x131;nlar&#x131;</source>. (<year>2023</year>).
</mixed-citation>
</ref>
<ref id="B41">
<label>41</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Welch</surname> <given-names>G</given-names></name>
<name><surname>Bishop</surname> <given-names>G</given-names></name>
</person-group>. 
<article-title>An introduction to the Kalman filter</article-title>. (<year>1995</year>).
</mixed-citation>
</ref>
<ref id="B42">
<label>42</label>
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Casiez</surname> <given-names>G</given-names></name>
<name><surname>Roussel</surname> <given-names>N</given-names></name>
<name><surname>Vogel</surname> <given-names>D</given-names></name>
</person-group>. (<year>2012</year>). 
<article-title>1&#x20ac; filter: a simple speed-based low-pass filter for noisy input in interactive systems</article-title>, in: <conf-name>Proceedings of the SIGCHI Conference on Human Factors in Computing Systems</conf-name>, <publisher-loc>New York, NY</publisher-loc>: 
<publisher-name>ACM</publisher-name>. pp. <page-range>2527&#x2013;30</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1145/2207676.2208639</pub-id>, PMID: <pub-id pub-id-type="pmid">40727313</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<label>43</label>
<mixed-citation publication-type="web">
<person-group person-group-type="author">
<name><surname>Google</surname> <given-names>AI</given-names></name>
</person-group>. 
<article-title>Pose landmark detection guide</article-title> (<year>2024</year>). Available online at: <uri xlink:href="https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker">https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker</uri> (Accessed <date-in-citation content-type="access-date">November 2, 2025</date-in-citation>).
</mixed-citation>
</ref>
<ref id="B44">
<label>44</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wagh</surname> <given-names>V</given-names></name>
<name><surname>Scott</surname> <given-names>MW</given-names></name>
<name><surname>Kraeutner</surname> <given-names>S</given-names></name>
</person-group>. 
<article-title>Quantifying similarities between MediaPipe and a known standard for tracking 2D hand trajectories</article-title>. <source>bioRxiv</source>. (<year>2023</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.1101/2023.11.21.568085</pub-id>, PMID: <pub-id pub-id-type="pmid">41723302</pub-id>
</mixed-citation>
</ref>
<ref id="B45">
<label>45</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>De Gusm&#xe3;o Lafayette</surname> <given-names>T</given-names></name>
<name><surname>De Lima Kunst</surname> <given-names>V</given-names></name>
<name><surname>De Sousa Melo</surname> <given-names>P</given-names></name>
<name><surname>De Oliveira Guedes</surname> <given-names>P</given-names></name>
<name><surname>Teixeira</surname> <given-names>J</given-names></name>
<name><surname>Vasconcelos</surname> <given-names>C</given-names></name>
<etal/>
</person-group>. 
<article-title>Validation of angle estimation based on body tracking data from RGB-D and RGB cameras for biomechanical assessment</article-title>. <source>Sensors</source>. (<year>2022</year>) <volume>23</volume>:<elocation-id>3</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s23010003</pub-id>, PMID: <pub-id pub-id-type="pmid">36616603</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<label>46</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Jayavel</surname> <given-names>P</given-names></name>
<name><surname>Srinivasan</surname> <given-names>H</given-names></name>
<name><surname>Karthik</surname> <given-names>V</given-names></name>
<name><surname>Fouly</surname> <given-names>A</given-names></name>
<name><surname>Devaraj</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Human upper limb kinematics using a novel algorithm in post-stroke patients</article-title>. <source>Proc Inst Mech Eng Part H: J Eng Med</source>. (<year>2025</year>) <volume>239</volume>:<fpage>48</fpage>&#x2013;<lpage>55</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1177/09544119251315421</pub-id>, PMID: <pub-id pub-id-type="pmid">39866064</pub-id>
</mixed-citation>
</ref>
<ref id="B47">
<label>47</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ali</surname> <given-names>MM</given-names></name>
<name><surname>Mohamed</surname> <given-names>SI</given-names></name>
</person-group>. 
<article-title>A pose estimation for motion tracking of infants cerebral palsy</article-title>. <source>Multimedia Tools Appl</source>. (<year>2025</year>) <volume>84</volume>:<page-range>8261&#x2013;86</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s11042-024-19198-5</pub-id>, PMID: <pub-id pub-id-type="pmid">41761023</pub-id>
</mixed-citation>
</ref>
<ref id="B48">
<label>48</label>
<mixed-citation publication-type="confproc">
<person-group person-group-type="author">
<name><surname>Patil</surname> <given-names>VK</given-names></name>
<name><surname>Dhamange</surname> <given-names>S</given-names></name>
<name><surname>Bhandurge</surname> <given-names>S</given-names></name>
<name><surname>Gaikwad</surname> <given-names>S</given-names></name>
<name><surname>Patil</surname> <given-names>A</given-names></name>
</person-group>. 
<article-title>Dynamic human activity recognition with vision-based pose estimation and machine learning for various age groups</article-title>, in: <source>2024 International Conference on Emerging Smart Computing and Informatics (ESCI)</source>, <publisher-loc>Piscataway, NJ</publisher-loc>: 
<publisher-name>IEEE</publisher-name>. (<year>2024</year>). <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1109/ESCI59607.2024.10497259</pub-id>, PMID: <pub-id pub-id-type="pmid">41116384</pub-id>
</mixed-citation>
</ref>
<ref id="B49">
<label>49</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Clemente</surname> <given-names>C</given-names></name>
<name><surname>Chambel</surname> <given-names>G</given-names></name>
<name><surname>Silva</surname> <given-names>DC</given-names></name>
<name><surname>Montes</surname> <given-names>AM</given-names></name>
<name><surname>Pinto</surname> <given-names>JF</given-names></name>
<name><surname>Silva</surname> <given-names>HPD</given-names></name>
</person-group>. 
<article-title>Feasibility of 3D body tracking from monocular 2D video feeds in musculoskeletal telerehabilitation</article-title>. <source>Sensors</source>. (<year>2023</year>) <volume>24</volume>:<elocation-id>206</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/s24010206</pub-id>, PMID: <pub-id pub-id-type="pmid">38203068</pub-id>
</mixed-citation>
</ref>
<ref id="B50">
<label>50</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Zhao</surname> <given-names>Z</given-names></name>
<name><surname>Zhu</surname> <given-names>Z</given-names></name>
<name><surname>Zhang</surname> <given-names>X</given-names></name>
<name><surname>Tang</surname> <given-names>H</given-names></name>
<name><surname>Xing</surname> <given-names>J</given-names></name>
<name><surname>Hu</surname> <given-names>X</given-names></name>
<etal/>
</person-group>. 
<article-title>Atypical head movement during face-to-face interaction in children with autism spectrum disorder</article-title>. <source>Autism Res</source>. (<year>2021</year>) <volume>14</volume>:<page-range>1197&#x2013;208</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/aur.2478</pub-id>, PMID: <pub-id pub-id-type="pmid">33529500</pub-id>
</mixed-citation>
</ref>
<ref id="B51">
<label>51</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Memon</surname> <given-names>A</given-names></name>
<name><surname>Arain</surname> <given-names>Q</given-names></name>
<name><surname>Pirzada</surname> <given-names>N</given-names></name>
<name><surname>Shaikh</surname> <given-names>A</given-names></name>
<name><surname>Sulaiman</surname> <given-names>A</given-names></name>
<name><surname>Al Reshan</surname> <given-names>MS</given-names></name>
<etal/>
</person-group>. 
<article-title>Prior-free 3D human pose estimation in a video using limb-vectors</article-title>. <source>ICT Express</source>. (<year>2024</year>) <volume>10</volume>:<page-range>1266&#x2013;72</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.icte.2024.09.015</pub-id>, PMID: <pub-id pub-id-type="pmid">41763906</pub-id>
</mixed-citation>
</ref>
<ref id="B52">
<label>52</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Kim</surname> <given-names>JW</given-names></name>
<name><surname>Choi</surname> <given-names>JY</given-names></name>
<name><surname>Ha</surname> <given-names>EJ</given-names></name>
<name><surname>Choi</surname> <given-names>JH</given-names></name>
</person-group>. 
<article-title>Human pose estimation using mediapipe pose and optimization method based on a humanoid model</article-title>. <source>Appl Sci</source>. (<year>2023</year>) <volume>13</volume>:<elocation-id>2700</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3390/app13042700</pub-id>, PMID: <pub-id pub-id-type="pmid">41725453</pub-id>
</mixed-citation>
</ref>
<ref id="B53">
<label>53</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Neupane</surname> <given-names>RB</given-names></name>
<name><surname>Li</surname> <given-names>K</given-names></name>
<name><surname>Boka</surname> <given-names>TF</given-names></name>
</person-group>. 
<article-title>A survey on deep 3D human pose estimation</article-title>. <source>Artif Intell Rev</source>. (<year>2024</year>) <volume>58</volume>:<elocation-id>24</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10462-024-11019-3</pub-id>, PMID: <pub-id pub-id-type="pmid">41761023</pub-id>
</mixed-citation>
</ref>
<ref id="B54">
<label>54</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Breiman</surname> <given-names>L</given-names></name>
</person-group>. 
<article-title>Random forests</article-title>. <source>Mach Learn</source>. (<year>2001</year>) <volume>45</volume>:<fpage>5</fpage>&#x2013;<lpage>32</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id>, PMID: <pub-id pub-id-type="pmid">40797221</pub-id>
</mixed-citation>
</ref>
<ref id="B55">
<label>55</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Geurts</surname> <given-names>P</given-names></name>
<name><surname>Ernst</surname> <given-names>D</given-names></name>
<name><surname>Wehenkel</surname> <given-names>L</given-names></name>
</person-group>. 
<article-title>Extremely randomized trees</article-title>. <source>Mach Learn</source>. (<year>2006</year>) <volume>63</volume>:<fpage>3</fpage>&#x2013;<lpage>42</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10994-006-6226-1</pub-id>, PMID: <pub-id pub-id-type="pmid">41761023</pub-id>
</mixed-citation>
</ref>
<ref id="B56">
<label>56</label>
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Freund</surname> <given-names>Y</given-names></name>
<name><surname>Schapire</surname> <given-names>RE</given-names></name>
</person-group>. 
<article-title>A desicion-theoretic generalization of on-line learning and an application to boosting</article-title>. In: <source>European conference on computational learning theory</source>. 
<publisher-name>Springer Berlin Heidelberg</publisher-name>, <publisher-loc>Berlin, Heidelberg</publisher-loc> (<year>1995</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.1007/3-540-59119-2_166</pub-id>
</mixed-citation>
</ref>
<ref id="B57">
<label>57</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Hanley</surname> <given-names>JA</given-names></name>
<name><surname>McNeil</surname> <given-names>BJ</given-names></name>
</person-group>. 
<article-title>The meaning and use of the area under a receiver operating characteristic (ROC) curve</article-title>. <source>Radiology</source>. (<year>1982</year>) <volume>143</volume>:<fpage>29</fpage>&#x2013;<lpage>36</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiology.143.1.70637</pub-id>, PMID: <pub-id pub-id-type="pmid">40481873</pub-id>
</mixed-citation>
</ref>
<ref id="B58">
<label>58</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Lindhiem</surname> <given-names>O</given-names></name>
<name><surname>Goel</surname> <given-names>M</given-names></name>
<name><surname>Shaaban</surname> <given-names>S</given-names></name>
<name><surname>Mak</surname> <given-names>KJ</given-names></name>
<name><surname>Chikersal</surname> <given-names>P</given-names></name>
<name><surname>Feldman</surname> <given-names>J</given-names></name>
<etal/>
</person-group>. 
<article-title>Objective measurement of hyperactivity using mobile sensing and machine learning: Pilot study</article-title>. <source>JMIR Formative Res</source>. (<year>2022</year>) <volume>6</volume>:<fpage>e35803</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.2196/35803</pub-id>, PMID: <pub-id pub-id-type="pmid">35468089</pub-id>
</mixed-citation>
</ref>
<ref id="B59">
<label>59</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Bhattacharyya</surname> <given-names>N</given-names></name>
<name><surname>Singh</surname> <given-names>S</given-names></name>
<name><surname>Banerjee</surname> <given-names>A</given-names></name>
<name><surname>Ghosh</surname> <given-names>R</given-names></name>
<name><surname>Sinha</surname> <given-names>O</given-names></name>
<name><surname>Das</surname> <given-names>N</given-names></name>
<etal/>
</person-group>. 
<article-title>Integration of electroencephalogram (EEG) and motion tracking sensors for objective measure of attention-deficit hyperactivity disorder (MAHD) in pre-schoolers</article-title>. <source>Rev Sci Instru</source>. (<year>2022</year>) <volume>93</volume>. doi:&#xa0;<pub-id pub-id-type="doi">10.1063/5.0088044</pub-id>, PMID: <pub-id pub-id-type="pmid">35649790</pub-id>
</mixed-citation>
</ref>
<ref id="B60">
<label>60</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Merzon</surname> <given-names>L</given-names></name>
<name><surname>Pettersson</surname> <given-names>K</given-names></name>
<name><surname>Aronen</surname> <given-names>ET</given-names></name>
<name><surname>Huhdanp&#xe4;&#xe4;</surname> <given-names>H</given-names></name>
<name><surname>Seesj&#xe4;rvi</surname> <given-names>E</given-names></name>
<name><surname>Henriksson</surname> <given-names>L</given-names></name>
<etal/>
</person-group>. 
<article-title>Eye movement behavior in a real-world virtual reality task reveals ADHD in children</article-title>. <source>Sci Rep</source>. (<year>2022</year>) <volume>12</volume>:<fpage>20308</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598-022-24552-4</pub-id>, PMID: <pub-id pub-id-type="pmid">36434040</pub-id>
</mixed-citation>
</ref>
<ref id="B61">
<label>61</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Chang</surname> <given-names>TM</given-names></name>
<name><surname>Wu</surname> <given-names>RC</given-names></name>
<name><surname>Yang</surname> <given-names>RC</given-names></name>
<name><surname>Chiang</surname> <given-names>CT</given-names></name>
<name><surname>Chiu</surname> <given-names>YH</given-names></name>
<name><surname>Ouyang</surname> <given-names>CS</given-names></name>
<etal/>
</person-group>. 
<article-title>Objective diagnosis of ADHD through movement analysis by using a smart chair with piezoelectric material</article-title>. <source>Pediatr Neonatol</source>. (<year>2023</year>) <volume>64</volume>:<fpage>46</fpage>&#x2013;<lpage>52</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.pedneo.2022.06.007</pub-id>, PMID: <pub-id pub-id-type="pmid">36089537</pub-id>
</mixed-citation>
</ref>
<ref id="B62">
<label>62</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Ouyang</surname> <given-names>CS</given-names></name>
<name><surname>Yang</surname> <given-names>RC</given-names></name>
<name><surname>Wu</surname> <given-names>RC</given-names></name>
<name><surname>Chiang</surname> <given-names>CT</given-names></name>
<name><surname>Chiu</surname> <given-names>YH</given-names></name>
<name><surname>Lin</surname> <given-names>LC</given-names></name>
</person-group>. 
<article-title>Objective and automatic assessment approach for diagnosing attention-deficit/hyperactivity disorder based on skeleton detection and classification analysis in outpatient videos</article-title>. <source>Child Adolesc Psychiatry Ment Health</source>. (<year>2024</year>) <volume>18</volume>:<fpage>60</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s13034-024-00749-5</pub-id>, PMID: <pub-id pub-id-type="pmid">38802862</pub-id>
</mixed-citation>
</ref>
<ref id="B63">
<label>63</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Andrikopoulos</surname> <given-names>D</given-names></name>
<name><surname>Vassiliou</surname> <given-names>G</given-names></name>
<name><surname>Fatouros</surname> <given-names>P</given-names></name>
<name><surname>Tsirmpas</surname> <given-names>C</given-names></name>
<name><surname>Pehlivanidis</surname> <given-names>A</given-names></name>
<name><surname>Papageorgiou</surname> <given-names>C</given-names></name>
</person-group>. 
<article-title>Machine learning-enabled detection of attention-deficit/hyperactivity disorder with multimodal physiological data: a case-control study</article-title>. <source>BMC Psychiatry</source>. (<year>2024</year>) <volume>24</volume>:<fpage>547</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1186/s12888-024-05987-7</pub-id>, PMID: <pub-id pub-id-type="pmid">39103819</pub-id>
</mixed-citation>
</ref>
<ref id="B64">
<label>64</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Wang</surname> <given-names>X</given-names></name>
<name><surname>Xiao</surname> <given-names>H</given-names></name>
<name><surname>Zhao</surname> <given-names>Y</given-names></name>
<name><surname>Li</surname> <given-names>P</given-names></name>
<name><surname>Hu</surname> <given-names>X</given-names></name>
<name><surname>Qiu</surname> <given-names>X</given-names></name>
<etal/>
</person-group>. 
<article-title>Toward omni healthcare: wearable technologies for noncommunicable disease dynamic monitoring</article-title>. <source>Med Bull</source>. (<year>2025</year>) <volume>2</volume>:<page-range>5&#x2013;19</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1002/mdb2.70014</pub-id>, PMID: <pub-id pub-id-type="pmid">41757603</pub-id>
</mixed-citation>
</ref>
<ref id="B65">
<label>65</label>
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name><surname>Basic</surname> <given-names>J</given-names></name>
<name><surname>Uusimaa</surname> <given-names>J</given-names></name>
<name><surname>Salmi</surname> <given-names>J</given-names></name>
</person-group>. 
<article-title>Wearable motion sensors in the detection of ADHD: A critical review</article-title>. In: 
<person-group person-group-type="editor">
<name><surname>S&#xe4;rest&#xf6;niemi</surname> <given-names>M</given-names></name>
</person-group>, editors. <source>Digital health and wireless solutions. NCDHWS 2024. Communications in computer and information science</source>, <volume>2084</volume>. 
<publisher-name>Springer</publisher-name>, <publisher-loc>Cham</publisher-loc> (<year>2024</year>). doi:&#xa0;<pub-id pub-id-type="doi">10.1007/978-3-031-59091-7_12</pub-id>, PMID: <pub-id pub-id-type="pmid">41758248</pub-id>
</mixed-citation>
</ref>
<ref id="B66">
<label>66</label>
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name><surname>Willoughby</surname> <given-names>MT</given-names></name>
<name><surname>Pek</surname> <given-names>J</given-names></name>
<name><surname>Greenberg</surname> <given-names>MT</given-names></name><collab>Family Life Project Investigators</collab>
</person-group>. 
<article-title>Parent-reported attention deficit/hyperactivity symptomatology in preschool-aged children: Factor structure, developmental change, and early risk factors</article-title>. <source>J Abnormal Child Psychol</source>. (<year>2012</year>) <volume>40</volume>:<page-range>1301&#x2013;12</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s10802-012-9641-8</pub-id>, PMID: <pub-id pub-id-type="pmid">22581375</pub-id>
</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/232354">Luca Steardo Jr.</ext-link>, University Magna Graecia of Catanzaro, Italy</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1484832">Junbin Tian</ext-link>, Peking University Sixth Hospital, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1806828">Chao Li</ext-link>, Sichuan Academy of Medical Sciences and Sichuan Provincial People&#x2019;s Hospital, China</p></fn>
</fn-group>
<fn-group>
<fn fn-type="abbr" id="abbrev1">
<label>Abbreviations:</label>
<p>ADHD, Attention-Deficit/Hyperactivity Disorder; AI, Artificial Intelligence; APA, American Psychiatric Association; AUC, Area Under the (ROC) Curve; CI, Confidence Interval; CTRS-R:S, Conners Teacher Rating Scale&#x2013;Revised: Short Form; CPT, Continuous Performance Test; DSM-5, Diagnostic and Statistical Manual of Mental Disorders, 5th Edition; DSM-5-TR, Diagnostic and Statistical Manual of Mental Disorders, 5th Edition, Text Revision; ECI-4, Early Childhood Inventory-4; EEG, Electroencephalography; EPELI, Executive Performance in Everyday Living; F1-score, F1 Score; fps, frames per second; HER, High-Energy Rate; IMT, Infrared Motion Tracking; KNN, k-Nearest Neighbors; MRI/fMRI, (functional) Magnetic Resonance Imaging; NPV, Negative Predictive Value; OpenPose, Open-source pose estimation framework (library name); PFI, Permutation Feature Importance; POV, Point-of-View; ROC, Receiver Operating Characteristic; RF, Random Forest; SD, Standard Deviation; SPSS, Statistical Package for the Social Sciences (IBM SPSS Statistics); SVM, Support Vector Machine; Var, Variance; VR, Virtual Reality; ZCR, Zero-Crossing Rate; XTR, Extremely Randomized Trees (Extra Trees); ADA, AdaBoost.</p>
</fn>
</fn-group>
</back>
</article>