<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="review-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Rehabil. Sci.</journal-id>
<journal-title>Frontiers in Rehabilitation Sciences</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Rehabil. Sci.</abbrev-journal-title>
<issn pub-type="epub">2673-6861</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fresc.2023.1130847</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Rehabilitation Sciences</subject>
<subj-group>
<subject>Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Computer-assisted approaches for measuring, segmenting, and analyzing functional upper extremity movement: a narrative review of the current state, limitations, and future directions</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Jackson</surname><given-names>Kyle L.</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/2142987/overview"/></contrib>
<contrib contrib-type="author"><name><surname>Duri&#x0107;</surname><given-names>Zoran</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Engdahl</surname><given-names>Susannah M.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1679148/overview" /></contrib>
<contrib contrib-type="author"><name><surname>Santago II</surname><given-names>Anthony C.</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2246996/overview" /></contrib>
<contrib contrib-type="author"><name><surname>DeStefano</surname><given-names>Secili</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref></contrib>
<contrib contrib-type="author"><name><surname>Gerber</surname><given-names>Lynn H.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref></contrib>
</contrib-group>
<aff id="aff1"><label><sup>1</sup></label><addr-line>Department of Computer Science</addr-line>, <institution>George Mason University</institution>, <addr-line>Fairfax, VA</addr-line>, <country>United States</country></aff>
<aff id="aff2"><label><sup>2</sup></label><institution>MITRE Corporation</institution>, <addr-line>McLean, VA</addr-line>, <country>United States</country></aff>
<aff id="aff3"><label><sup>3</sup></label><addr-line>Center for Adaptive Systems and Brain-Body Interactions</addr-line>, <institution>George Mason University</institution>, <addr-line>Fairfax, VA</addr-line>, <country>United States</country></aff>
<aff id="aff4"><label><sup>4</sup></label><addr-line>Department of Bioengineering</addr-line>, <institution>George Mason University</institution>, <addr-line>Fairfax, VA</addr-line>, <country>United States</country></aff>
<aff id="aff5"><label><sup>5</sup></label><institution>American Orthotic &#x0026; Prosthetic Association</institution>, <addr-line>Alexandria, VA</addr-line>, <country>United States</country></aff>
<aff id="aff6"><label><sup>6</sup></label><institution>Optimal Motion</institution>, <addr-line>Herndon, VA</addr-line>, <country>United States</country></aff>
<aff id="aff7"><label><sup>7</sup></label><addr-line>College of Public Health</addr-line>, <institution>George Mason University</institution>, <addr-line>Fairfax, VA</addr-line>, <country>United States</country></aff>
<aff id="aff8"><label><sup>8</sup></label><institution>Inova Health System</institution>, <addr-line>Falls Church, VA</addr-line>, <country>United States</country></aff>
<author-notes>
<fn fn-type="edited-by"><p><bold>Edited by:</bold> Anwar P. P. Abdul Majeed, Xi&#x2019;an Jiaotong-Liverpool University, China</p></fn>
<fn fn-type="edited-by"><p><bold>Reviewed by:</bold> Marco Ghislieri, Polytechnic University of Turin, Italy, Lynne Gauthier, University of Massachusetts Lowell, United States</p></fn>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Kyle Jackson <email>kjacks21@gmu.edu</email></corresp>
<fn fn-type="other" id="fn001"><p><bold>Specialty Section:</bold> This article was submitted to Rehabilitation Engineering, a section of the journal Frontiers in Rehabilitation Sciences</p></fn>
</author-notes>
<pub-date pub-type="epub"><day>11</day><month>04</month><year>2023</year></pub-date>
<pub-date pub-type="collection"><year>2023</year></pub-date>
<volume>4</volume><elocation-id>1130847</elocation-id>
<history>
<date date-type="received"><day>06</day><month>01</month><year>2023</year></date>
<date date-type="accepted"><day>23</day><month>03</month><year>2023</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2023 Jackson, Duri&#x0107;, Engdahl, Santago, DeStefano and Gerber.</copyright-statement>
<copyright-year>2023</copyright-year><copyright-holder>Jackson, Duri&#x0107;, Engdahl, Santago, DeStefano and Gerber</copyright-holder><license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>The analysis of functional upper extremity (UE) movement kinematics has implications across domains such as rehabilitation and evaluating job-related skills. Using movement kinematics to quantify movement quality and skill is a promising area of research but is currently not being used widely due to issues associated with cost and the need for further methodological validation. Recent developments by computationally-oriented research communities have resulted in potentially useful methods for evaluating UE function that may make kinematic analyses easier to perform, generally more accessible, and provide more objective information about movement quality, the importance of which has been highlighted during the COVID-19 pandemic. This narrative review provides an interdisciplinary perspective on the current state of computer-assisted methods for analyzing UE kinematics with a specific focus on how to make kinematic analyses more accessible to domain experts. We find that a variety of methods exist to more easily measure and segment functional UE movement, with a subset of those methods being validated for specific applications. Future directions include developing more robust methods for measurement and segmentation, validating these methods in conjunction with proposed kinematic outcome measures, and studying how to integrate kinematic analyses into domain expert workflows in a way that improves outcomes.</p>
</abstract>
<kwd-group>
<kwd>upper extremity</kwd>
<kwd>functional movement</kwd>
<kwd>kinematic analysis</kwd>
<kwd>machine learning</kwd>
<kwd>computer vision</kwd>
<kwd>rehabilitation</kwd>
</kwd-group>
<contract-num rid="cn001">&#x00A0;</contract-num>
<contract-sponsor id="cn001">MITRE Corporation, McLean, Virginia. &#x00A9;2021 The MITRE Corporation. All rights reserved. Approved for public release. Distribution unlimited 22-00149-1. The funder was not involved in the study design, collection, analysis, interpretation of data, the writing of this article or the decision to submit it for publication</contract-sponsor>
<counts>
<fig-count count="3"/>
<table-count count="4"/><equation-count count="24"/><ref-count count="153"/><page-count count="0"/><word-count count="0"/></counts>
</article-meta>
</front>
<body>
<sec id="s1" sec-type="intro"><label>1.</label><title>Introduction</title>
<p>Functional upper extremity (UE) movements, where the UE is defined as including all regions distal from and including the shoulder (<xref ref-type="bibr" rid="B1">1</xref>), are used to purposely engage with one&#x2019;s environment (<xref ref-type="bibr" rid="B2">2</xref>) for needed or desired activities. The execution of these movements requires the coordination of multiple processes (<xref ref-type="bibr" rid="B3">3</xref>), where a disruption in one part of the chain may challenge an individual&#x2019;s ability to execute their desired task. Outcome measures derived from UE functional assessments (UEFAs) are used to support evidence-based research (e.g., meta-analyses), evaluate the impact of a disease or disability, and evaluate interventions (<xref ref-type="bibr" rid="B4">4</xref>).</p>
<p>The World Health Organization (WHO) International Classification of Functioning, Disability, and Health (ICF) is an internationally recognized framework for describing and measuring human health and disability (<xref ref-type="bibr" rid="B5">5</xref>). Using the WHO ICF, UEFAs can be classified as measuring body functions and structure (i.e., physiological function and anatomy), activity (i.e., execution of task or action by individual), or participation (i.e., involvement in a life situation), with overlap across categories being possible (<xref ref-type="bibr" rid="B4">4</xref>). This schema provides a standard nomenclature by which one selects outcomes measures that can link various domains (e.g., impairment, function, societal integration) to better predict relationships needed to produce desired clinical outcomes. Additionally, the ICF provides a conceptual framework for assessing function likely to be valued by the individual with a specific diagnosis or impairment. Through its direct approach for evaluating both anatomically based outcomes and their utility to a person in their environment, one can systematically assess how an intervention impacts people.</p>
<p>Outcome measures can be further categorized as subjective or objective. The former consists of self-reports and the latter consists of data collected by instruments or a third party using &#x201C;&#x2026;&#x2009;validated equipment and standardized measurement protocols.&#x201D; (<xref ref-type="bibr" rid="B4">4</xref>). Both are essential for evaluating the effects of treatments. The ICF was motivated, among other factors, by the need to go beyond indicating whether a disease or disorder is present in an individual, which alone is a poor indicator of health planning and management requirements (<xref ref-type="bibr" rid="B6">6</xref>). In fact, the ICF was developed to augment patient evaluation and treatment from the perspective of health and not disease and disability. In other words, this approach permits a systematic documentation of an individual&#x2019;s deficits and abilities. The ICF promotes a view of health that hopefully will influence policy and practice that is additive to traditional mortality and morbidity outcome measures (<xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>Numerous UEFAs have been validated to provide additional information besides the presence of a disease or disorder (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B7">7</xref>). However, currently validated UEFAs that measure an individual&#x2019;s ability to execute a task have limitations. Although both performance-based measures and self-reports are critical, subjective self-report measures can be biased (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B8">8</xref>). Furthermore, existing measures do not adequately measure movement quality (<xref ref-type="bibr" rid="B9">9</xref>), efficiency, or level of effort. These aspects of functional movement are important for a variety of applications, such as discerning between behavioral restitution and compensation during stroke rehabilitation (<xref ref-type="bibr" rid="B9">9</xref>), skilled job-related movements (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B11">11</xref>), and evaluating UE prostheses (<xref ref-type="bibr" rid="B12">12</xref>&#x2013;<xref ref-type="bibr" rid="B15">15</xref>).</p>
<p>UEFAs that use kinematics may provide more objective information of functional movement compared to existing validated clinical measures (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B16">16</xref>). The kinematics of human motion refer to the position displacement and its derivatives (e.g., velocity, acceleration, jerk) of the human body or manipulated objects. The analysis of kinematics includes the calculation of joint angles (<xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B21">21</xref>) and measures of functional ability during goal-oriented tasks (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B16">16</xref>). Kinematics have traditionally been measured using specialized equipment, such as optical motion capture systems (<xref ref-type="bibr" rid="B20">20</xref>&#x2013;<xref ref-type="bibr" rid="B22">22</xref>), electrogoniometers (<xref ref-type="bibr" rid="B23">23</xref>), inertial measurement units (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>), and hand-held devices (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B26">26</xref>). However, many of these systems can be prohibitively expensive to own and operate or are not easily portable, restricting wide-spread usage in relevant environments. Furthermore, post-processing these information (e.g., labelling occluded markers, movement segmentation) for analysis can be a manually-intensive and time-consuming process.</p>
<p>Advances in measurement sensors, computer vision, and machine learning have enabled the measurement and analysis of UE kinematics beyond the laboratory. Methods have been developed for estimating human pose without markers (<xref ref-type="bibr" rid="B27">27</xref>&#x2013;<xref ref-type="bibr" rid="B30">30</xref>) and automatically recognizing activities and actions (<xref ref-type="bibr" rid="B31">31</xref>&#x2013;<xref ref-type="bibr" rid="B37">37</xref>). Nonetheless, there is limited development and usage of computational tools for analyzing functional UE movement kinematics that meet the requirements of domain experts (e.g., biomechanists and clinicians). For example, in 2019 the Stroke Recovery and Rehabilitation Roundtable concluded that, &#x201C;&#x2026;&#x2009;only high-speed and high-resolution digital optoelectronic systems should be used to measure kinematics&#x2026;&#x201D;, specifically noting that wireless wearables (e.g., IMUs), Kinect, and other optical systems are currently inadequate for measuring movement quality (<xref ref-type="bibr" rid="B9">9</xref>). Furthermore, validating these computational tools for use in clinical and biomedical laboratories may require a level of rigor not typical of computational fields (e.g., correlating outputs from computational tools with health-related outcomes and evaluating quantities important to movement scientists) (<xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B39">39</xref>).</p>
<p>This paper investigates the following question: <italic>Given the need to inform clinical practice and job-related training with more objective data, what computer-assisted methods can reduce the burden associated with the kinematic analysis of UE movement</italic> (see <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>)? Due to the expansiveness of the kinematic analysis workflow, our discussion is restricted to a few notable examples of computer-assisted approaches used in kinematic analyses as opposed to a systematic review. This paper represents an interdisciplinary perspective on the current state of computer-assisted methods as it relates to the process of conducting kinematic analyses of functional movement. Advancements needed for wider usage of kinematics for UEFAs discussed in this paper include:</p>
<fig id="F1" position="float"><label>Figure 1</label>
<caption><p>The process of analyzing functional human movement, modified from (<xref ref-type="bibr" rid="B23">23</xref>), which is the organizing framework for this review. Sections in this paper corresponding to the different components of the framework are indicated. The movement segmentation component has a dashed outline to indicate that it is not a necessary part of the kinematic analysis process, although it is frequently required. Definitions of components in <xref ref-type="table" rid="T1">Table&#x00A0;1</xref>.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fresc-04-1130847-g001.tif"/>
</fig>
<list list-type="simple">
<list-item><label>1.</label><p>Developing measurement approaches, such as those based on markerless pose estimation, that meet accuracy requirements of domain experts, are easy to use, and measure relevant quantities (see Section <xref ref-type="sec" rid="s3">3</xref>).</p></list-item>
<list-item><label>2.</label><p>Computing useful measures from kinematic data often requires segmentation of movement into a standardized hierarchy, which is currently labor-intensive and not consistently defined (see Section <xref ref-type="sec" rid="s4">4</xref>).</p></list-item>
<list-item><label>3.</label><p>The need for validated kinematics-based outcome measures (see Section <xref ref-type="sec" rid="s5">5</xref>).</p></list-item>
<list-item><label>4.</label><p>Integrating kinematics analysis into domain expert workflows in a way that meaningfully improves domain-specific outcomes (see Section <xref ref-type="sec" rid="s6">6</xref>)</p></list-item>
</list>
<p>To our knowledge, a review has not been performed on computer-assisted methods for the entire UE functional movement kinematic analysis process (see <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>). Previous reviews have comprehensively assessed kinematic measures that quantify UE performance during a variety of functional tasks (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B40">40</xref>), although these do not consider the end-to-end kinematic analysis workflow. Related reviews cover multiple components of the kinematic analysis workflow (<xref ref-type="bibr" rid="B33">33</xref>, <xref ref-type="bibr" rid="B40">40</xref>, <xref ref-type="bibr" rid="B41">41</xref>), but they either are focused on a specific application (e.g., handwriting (<xref ref-type="bibr" rid="B42">42</xref>)) or omit important components of the workflow (e.g., functional primitive segmentation (<xref ref-type="bibr" rid="B40">40</xref>) and kinematic measurement (<xref ref-type="bibr" rid="B41">41</xref>)). There have also been reviews of computer-assisted methods to support rehabilitative training using serious games (<xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B44">44</xref>), which is related to our review but is not the focus.</p>
</sec>
<sec id="s2"><label>2.</label><title>Review organization</title>
<sec id="s2a"><label>2.1.</label><title>Exclusion and inclusion criteria</title>
<p>Excluded are applications in sports (<xref ref-type="bibr" rid="B45">45</xref>) and hand gesture recognition (<xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B47">47</xref>). Hand gesture recognition is excluded because it is a form of non-verbal communication, as opposed to being used for assessing functional UE movement.</p>
<p>Job-related assessments of skillful UE functional motion using kinematics are included. These assessments are similarly motivated by the need for more objective measures of performance (<xref ref-type="bibr" rid="B10">10</xref>, <xref ref-type="bibr" rid="B11">11</xref>) and follows closely with the health-oriented kinematic analysis workflow. The methods developed for job-related assessment applications can also be applied to health applications involving the UE.</p>
</sec>
<sec id="s2b"><label>2.2.</label><title>Organizational overview</title>
<p>Winter (<xref ref-type="bibr" rid="B23">23</xref>) describes the scientific approach to biomechanics, which this paper uses to represent the kinematic analysis workflow associated with UEFAs (<xref ref-type="bibr" rid="B23">23</xref>). We make an addition to the kinematic analysis process to include movement segmentation, which has previously been identified as necessary for a variety of kinematic analyses (<xref ref-type="bibr" rid="B40">40</xref>, <xref ref-type="bibr" rid="B48">48</xref>). The resulting process (see <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>) consists of movement measurement (Section <xref ref-type="sec" rid="s3">3</xref>), segmentation (Section <xref ref-type="sec" rid="s4">4</xref>), description and analysis (Section <xref ref-type="sec" rid="s5">5</xref>), and assessment and interpretation (Section <xref ref-type="sec" rid="s6">6</xref>). Definitions for each component are in <xref ref-type="table" rid="T1">Table&#x00A0;1</xref>.</p>
<table-wrap id="T1" position="float"><label>Table 1</label>
<caption><p>Computer-assisted functional upper extremity assessment process modified from (<xref ref-type="bibr" rid="B23">23</xref>).</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Process phase</th>
<th valign="top" align="left">Definition</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Measurement</td>
<td valign="top" align="left">The capture of UE motion kinematics, which results in data used for analysis. Often done with optical motion capture systems, wearable inertial measurement units, commodity cameras, or taken directly from the object being manipulated (e.g., a tablet stylus pen).</td>
</tr>
<tr>
<td valign="top" align="left">Movement Segmentation</td>
<td valign="top" align="left">The process of segmenting movements into distinct movement phases, such as functional movements and primitives (see <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>).</td>
</tr>
<tr>
<td valign="top" align="left">Description</td>
<td valign="top" align="left">Can be of many forms, but typically defined as visualizations of the data (e.g. velocity magnitude time series of wrist marker) or simple outcome measures.</td>
</tr>
<tr>
<td valign="top" align="left">Analysis</td>
<td valign="top" align="left">Defined as a mathematical operation performed on the data to present them in a different form or to combine several sources of data to produce a variable that is not directly measurable (e.g., inverse kinematic solution).</td>
</tr>
<tr>
<td valign="top" align="left">Assessment and Interpretation</td>
<td valign="top" align="left">The assessment of descriptions and analyses, which informs decisions about interventions.</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s3"><label>3.</label><title>Measurement</title>
<sec id="s3a"><label>3.1.</label><title>Background</title>
<p>Kinematics is concerned with quantifying the details of movement itself (e.g., position displacement, velocity, acceleration, and jerk) and not the forces that cause the movement, where the goal is to use kinematics to provide actionable information for the domain expert. Kinematic data are collected by either direct measurement or optical systems (<xref ref-type="bibr" rid="B23">23</xref>).</p>
<sec id="s3a1"><label>3.1.1.</label><title>Direct measurement systems</title>
<p>Direct measurement systems involve placing equipment on the individual being evaluated, which includes using electrogoniometers and special gloves for hands outfitted with transducers for measuring joint angles, and inertial sensors (<xref ref-type="bibr" rid="B23">23</xref>). These direct joint angle measurements (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B18">18</xref>) can be used in a variety of ways (e.g., visualized or measures computed from joint angle time series) to evaluate functional movement (<xref ref-type="bibr" rid="B19">19</xref>). Electrogoniometers can be relatively inexpensive and provide kinematic data immediately. However, it can be challenging to properly place the goniometer on an individual and wearing the device can influence their natural movement due to encumbrance. Additionally, more complex goniometers may be required for joints that do not move as hinge joints (e.g., wrist and shoulder). Inertial sensors are worn on the body, where inertial data from the sensors individually (e.g., motion of the wrist only) (<xref ref-type="bibr" rid="B49">49</xref>) or from multiple sensors (e.g., fused together to provide human pose estimates and joint angles) can be used for kinematic analyses (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B50">50</xref>). There are also systems that measure the movement of a device being operated by the individual, such as end effectors (e.g., tablet pens (<xref ref-type="bibr" rid="B42">42</xref>), haptic devices (<xref ref-type="bibr" rid="B26">26</xref>), ultrasound probes (<xref ref-type="bibr" rid="B11">11</xref>), laparsoscopic manipulators (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>)) and exoskeletons (<xref ref-type="bibr" rid="B53">53</xref>)). The equipment cost and ease of use varies greatly across these systems, but generally they provide high sampling rates and accurate kinematics.</p>
</sec>
<sec id="s3a2"><label>3.1.2.</label><title>Optical systems</title>
<p>Optical systems can be categorized as being markerless video capture, marker-based capture with passive reflective markers, or optoelectric systems with markers that actively emit light (<xref ref-type="bibr" rid="B23">23</xref>). Optical systems are used to provide motion of individual landmarks (e.g., on the wrist) or to model human pose, where the latter can be used to measure joint angles (<xref ref-type="bibr" rid="B19">19</xref>). Markerless capture cameras, which include 2D RGB and 3D RGB-D cameras, are relatively inexpensive, but have traditionally required anatomical landmarks to be manually identified by a human operator, an approach that makes this process infeasible for large studies or widespread usage. However, markerless and passive marker systems either do not or minimally encumber the individual being evaluated, whereas active markers can be encumbering due to the wiring between the markers (see <xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>). Multi-camera systems for passive and active markers can also be prohibitively expensive to own and operate, although these systems are highly accurate and are considered to be the &#x201C;gold standard&#x201D; in movement science (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B55">55</xref>). Although not an optical system, some systems use active markers that emit sound or radio signals, which are picked up by receivers used to locate the active marker (<xref ref-type="bibr" rid="B13">13</xref>).</p>
<fig id="F2" position="float"><label>Figure 2</label>
<caption><p>(left) Individual outfitted with active markers for an optoelectronic motion capture system (NDI Optotrak&#x00AE;) while completing the Targeted Box and Blocks Test (<xref ref-type="bibr" rid="B54">54</xref>). (right) Individual moving objects over the middle partition while being tracked with the markerless pose estimation tool OpenPose (<xref ref-type="bibr" rid="B28">28</xref>). In the left image, multiple markers are placed on the right arm to reduce tracking fragmentation due to occlusions. These markers, cables, and associated outfit requirements could encumber or impact the individual&#x2019;s normal movement, motivating the use of a markerless motion capture system. Participant consent was given for photo usage.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fresc-04-1130847-g002.tif"/>
</fig>
</sec>
</sec>
<sec id="s3b"><label>3.2.</label><title>Human pose estimation</title>
<p>Measurement tools are needed that minimize the impact of encumbrance on natural movement, provide near real-time kinematic data with minimal noise and inaccuracies, and are relatively inexpensive to own and operate. A 2019 systematic review of low-cost optical motion capture for clinical rehabilitation indicated the need for better measurement methods and validation studies, although most papers reviewed were not specific to UE functional motion (<xref ref-type="bibr" rid="B56">56</xref>).</p>
<p>There has recently been substantial progress on 2D and 3D human pose estimation using low-cost sensors, where the goal is to infer a representation of the body from images, video, or inertial sensor data. Kinematics can then be derived from the output representation. <xref ref-type="table" rid="T2">Table&#x00A0;2</xref> represents a taxonomy of these methods, inspired by previous taxonomies (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B57">57</xref>). Included in <xref ref-type="table" rid="T2">Table&#x00A0;2</xref> are recent reviews and research papers for each methodological approach, along with a non-exhaustive list of works evaluating the utility of these methods for measuring UE kinematics. A comprehensive review of human pose estimation algorithms is beyond the scope of this paper, where instead we include a brief description of recent measurement approaches categorized by one of three input data types&#x2014;RGB, RGB-D, and inertial data&#x2014;and synthesize recent results from studies evaluating their utility for use cases involving UE functional motion. Radio frequency devices (e.g., WiFi) that do not require transmitters placed on the body have also been used for pose estimation (<xref ref-type="bibr" rid="B30">30</xref>). However, these methods currently have low spatial resolution, and we are not aware of their usage for UEFAs.</p>
<table-wrap id="T2" position="float"><label>Table 2</label>
<caption><p>Taxonomy of human pose estimation approaches inspired by (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B57">57</xref>).</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Output dimension</th>
<th valign="top" align="left">Output representation</th>
<th valign="top" align="left">Measurement device</th>
<th valign="top" align="left">Input data</th>
<th valign="top" align="left">Methodological references</th>
<th valign="top" align="left">UE application</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" rowspan="2">2D HPE</td>
<td valign="top" align="left">Planar</td>
<td valign="top" align="left">Monocular camera</td>
<td valign="top" align="left">2D RGB</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B57">57</xref>)</td>
<td valign="top" align="left">Not aware of usage</td>
</tr>
<tr>
<td valign="top" align="left">Keypoint</td>
<td valign="top" align="left">Monocular camera</td>
<td valign="top" align="left">2D RGB</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B57">57</xref>)</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B58">58</xref>, <xref ref-type="bibr" rid="B59">59</xref>)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="7">3D HPE</td>
<td valign="top" align="left" rowspan="4">Kinematic</td>
<td valign="top" align="left">Monocular camera</td>
<td valign="top" align="left">2D RGB</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">30</xref>); (<xref ref-type="bibr" rid="B60">60</xref>) for hand pose</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B59">59</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Multi-view cameras</td>
<td valign="top" align="left">2D RGB</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B61">61</xref>, <xref ref-type="bibr" rid="B62">62</xref>)</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B63">63</xref>, <xref ref-type="bibr" rid="B64">64</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Depth camera (e.g., Kinect)</td>
<td valign="top" align="left">3D RGB-D</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">30</xref>); (<xref ref-type="bibr" rid="B60">60</xref>) for hand pose</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B59">59</xref>, <xref ref-type="bibr" rid="B65">65</xref>&#x2013;<xref ref-type="bibr" rid="B71">71</xref>, <xref ref-type="bibr" rid="B83">83</xref>, <xref ref-type="bibr" rid="B84">84</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Inertial sensors</td>
<td valign="top" align="left">Inertial data</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B72">72</xref>)</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B73">73</xref>, <xref ref-type="bibr" rid="B74">74</xref>)</td>
</tr>
<tr>
<td valign="top" align="left" rowspan="3">Volumetric</td>
<td valign="top" align="left">Monocular camera</td>
<td valign="top" align="left">2D RGB</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B29">29</xref>, <xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B76">76</xref>); (<xref ref-type="bibr" rid="B60">60</xref>) for hand pose</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B17">17</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">Multi-view cameras</td>
<td valign="top" align="left">2D RGB</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">30</xref>)</td>
<td valign="top" align="left">Not aware of usage</td>
</tr>
<tr>
<td valign="top" align="left">Depth cameras</td>
<td valign="top" align="left">3D RGB-D</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B77">77</xref>)</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B78">78</xref>)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="table-fn1"><p>HPE, human pose estimation; RGB, red-green-blue; RGB-D, red-green-blue-depth; 2D, two-dimensional; 3D, three-dimensional.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>Additionally, we consider four representations used in human pose estimation&#x2014;planar, kinematic, keypoint, and volumetric&#x2014;along with their respective input data types (<xref ref-type="bibr" rid="B30">30</xref>) (see <xref ref-type="table" rid="T2">Table&#x00A0;2</xref> for associations between representations and input data):
<list list-type="simple">
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM1"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula></label><p><bold>Planar</bold>: This representation models the shape and appearance of the human body, which is usually represented as rectangles approximating the contours of the body.</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM2"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula></label><p><bold>Kinematic</bold>: Models the joint positions and limb orientations of the human body in a 3D graph representation.</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM3"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula></label><p><bold>Keypoint</bold>: Similar to the 3D kinematic representation, except that it is a 2D projection of the 3D body (see <xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref>), i.e., the inferred representation is only in 2D. Note that some works in the computer vision literature (<xref ref-type="bibr" rid="B30">30</xref>) conflate the 3D kinematic representation with the 2D keypoint representation, which can be confusing.</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM4"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula></label><p><bold>Volumetric</bold>: A 3D mesh representation.</p></list-item>
</list></p>
<sec id="s3b1"><label>3.2.1.</label><title>2D RGB input</title>
<p>Human pose estimation algorithms can take input two-dimensional (2D; x and y) red-green-blue (RGB) images, which is what most consumer cameras capture, and output either a 2D or 3D representation of the body (<xref ref-type="bibr" rid="B30">30</xref>). Per <xref ref-type="table" rid="T2">Table&#x00A0;2</xref>, the output 2D representations are either planar or keypoints, and output 3D representations are kinematic or volumetric. Large data sets consisting of labeled anatomical landmarks or human pose are used to train machine learning models that infer anatomical landmarks in new, unseen images.</p>
<sec id="s3b1a"><label>3.2.1.1.</label><title>2D keypoint representation</title>
<p>The output 2D keypoint representation has had considerable research interest recently, which is partially motivated by the ubiquity of RGB cameras (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B57">57</xref>). Although there are many algorithms, one notable 2D human pose estimation algorithm is OpenPose (<xref ref-type="bibr" rid="B28">28</xref>), which has been evaluated for utility in measuring UE kinematics (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B58">58</xref>, <xref ref-type="bibr" rid="B59">59</xref>), among other approaches. These applications involved evaluating the 2D errors of the pose predictions for reaching movements in infants (<xref ref-type="bibr" rid="B58">58</xref>) or extracting depth values from a red-green-blue-depth (RGB-D) image using the 2D predictions to create 3D landmarks of UE movements (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B59">59</xref>). Using 2D keypoint predictions followed by converting to 3D coordinates using depth from an RGB-D camera appears to be the most common use of 2D pose estimation by movement scientists because human functional motion is often tri-planar, except for assessments where uni-planar movement is specifically of interest (e.g., shoulder abduction in frontal plane (<xref ref-type="bibr" rid="B59">59</xref>)).</p>
<p>The best-performing 2D pose estimation algorithms have been demonstrated to be useful for a variety of training and rehabilitation applications (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B58">58</xref>, <xref ref-type="bibr" rid="B59">59</xref>, <xref ref-type="bibr" rid="B79">79</xref>, <xref ref-type="bibr" rid="B80">80</xref>) involving gross movements. However, improvements are still needed to make 2D pose estimation comparable to gold standard motion capture systems, such as incorporating physiological constraints (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B80">80</xref>) and temporal smoothing (<xref ref-type="bibr" rid="B17">17</xref>). Additionally, many of the pre-trained 2D pose estimation methods rely on training data sets consisting primarily of able-bodied individuals (<xref ref-type="bibr" rid="B17">17</xref>) with crowd-sourced, hand-labeled keypoints that potentially contain errors (<xref ref-type="bibr" rid="B38">38</xref>). Perhaps the greatest limitation of 2D pose estimation for analyzing human kinematics is that it is not 3D, which makes measurement of complex 3D motions (i.e., tri-planar), textures, and shapes infeasible, particularly when using a single RGB camera.</p>
</sec>
<sec id="s3b1b"><label>3.2.1.2.</label><title>3D kinematic representation</title>
<p>An output 3D kinematic representation can be inferred from 2D RGB images either directly or as a follow-on step to an intermediary 2D pose estimation output (i.e., &#x201C;lifting&#x201D; from 2D-to-3D), where 2D-to-3D lifting approaches typically outperform direct estimation methods given the current state-of-the-art 2D pose estimation methods (<xref ref-type="bibr" rid="B30">30</xref>). Alternatively, 2D pose estimation using RGB from multiple camera views of the individual can provide an estimate of a 3D kinematic representation (<xref ref-type="bibr" rid="B61">61</xref>&#x2013;<xref ref-type="bibr" rid="B64">64</xref>), where a multi-camera setup requires synchronizing the recordings and computing 3D keypoints from the triangulation of the synchronized 2D keypoints (<xref ref-type="bibr" rid="B62">62</xref>). Additionally, multi-camera setups minimize the possibility of body parts being occluded during more complex motions, where occlusions can cause instability in pose estimation performance (<xref ref-type="bibr" rid="B62">62</xref>, <xref ref-type="bibr" rid="B78">78</xref>). These multi-camera methods have been evaluated against marker-based optical motion capture for the UE (<xref ref-type="bibr" rid="B61">61</xref>&#x2013;<xref ref-type="bibr" rid="B64">64</xref>). Although they were not focused solely on UE movement, the assessment methodologies and results are relevant.</p>
<p>OpenPose (<xref ref-type="bibr" rid="B28">28</xref>), a popular 2D pose estimation method from RGB, was evaluated with a multi-camera setup during walking, jumping, and throwing a ball, where the tracking results were compared to a marker-based optical motion capture system (<xref ref-type="bibr" rid="B63">63</xref>). For the shoulder, elbow, and wrist joints tracked, (<xref ref-type="bibr" rid="B63">63</xref>) found the respective mean absolute error (MAE)&#x2014;where we calculated the mean and standard deviation of the reported MAE values across activities and axes from Table 1 of (<xref ref-type="bibr" rid="B63">63</xref>)&#x2014;means to be 23.2, 28.9, and 24&#x2009;mm, and standard deviations to be 9.29, 16.2, and 13.5&#x2009;mm. In a separate study, three 2D pose estimation algorithms in a multi-camera setup outputting 3D pose estimates during walking, running, and jumping were compared to marker-based optical motion capture (<xref ref-type="bibr" rid="B64">64</xref>). The minimum and maximum of the 95&#x0025; limit of agreement values reported for the shoulder joint center during walking were 14 and 43&#x2009;mm, respectively, with generally higher errors for running and jumping. Ivorra et al. (<xref ref-type="bibr" rid="B59">59</xref>) evaluated the application of multiple pose estimation approaches to tracking UE exercises with a single camera view and found the method that used only 2D RGB data (referred to as RGB-3DHP)&#x2014;percent difference averaged across tasks of 18.2&#x0025; compared to marker-based capture&#x2014;to be less accurate than the other methods&#x2014;10.7&#x0025; and 7.6&#x0025;&#x2014;that used RGB-D data as input. Therefore, a 3D kinematic representation output from 2D images may be currently restricted to measuring gross UE motions for applications where high accuracy is not required, such as rehabilitation games, as recommended by (<xref ref-type="bibr" rid="B59">59</xref>).</p>
<p>Regarding the value of estimating 3D pose from multiple RGB cameras, (<xref ref-type="bibr" rid="B62">62</xref>) found that for a 2D pose estimation method called HRNet, the average marker error across all markers&#x2014;where markers were for the whole body&#x2014;and activities was 32mm with the two-camera setup, and improved to 29&#x2009;mm with a five camera setup. However, accuracy was consistent across the varied pose detectors and number of cameras when using OpenCap (proposed by (<xref ref-type="bibr" rid="B62">62</xref>)), which makes some modifications to the pose estimation process. These results suggest that while multiple cameras will help resolve issues with occlusion, exactly how many cameras are needed will depend on the pose estimation method being used and the types of motions being measured.</p>
</sec>
<sec id="s3b1c"><label>3.2.1.3.</label><title>3D volumetric representation</title>
<p>Inferred 3D volumetric representations (<xref ref-type="bibr" rid="B29">29</xref>, <xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B76">76</xref>) from 2D RGB input appear to be not as thoroughly studied for measuring UE kinematics compared to the 2D keypoint and 3D kinematic representations, although these methods appear to capture details of hands relatively well. One UE application example is UE kinematics being measured with wearable IMUs using an inferred 3D mesh representation and 2D keypoint representation&#x2014;stereo was used to get the depth values for the 2D representation&#x2014;for IMU calibration (<xref ref-type="bibr" rid="B17">17</xref>). After calibration, the IMUs could be used to track joint trajectories alone or optionally with the 3D pose estimates from video.</p>
</sec>
</sec>
<sec id="s3b2"><label>3.2.2.</label><title>3D RGB-D input</title>
<p>Pose estimation methods that take input 3D red-green-blue-depth (RGB-D; <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM5"><mml:mi>x</mml:mi></mml:math></inline-formula>, <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM6"><mml:mi>y</mml:mi></mml:math></inline-formula>, and <inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM7"><mml:mi>z</mml:mi></mml:math></inline-formula>) data will output either 3D kinematic or volumetric representations of the body (see <xref ref-type="table" rid="T2">Table&#x00A0;2</xref>). Microsoft Kinect&#x2014;versions include V1, V2, and Azure&#x2014;is an RGB-D camera commonly used in studies evaluating pose estimation for kinematic measurements because the cameras are relatively cheap, portable, easy-to-use, and have a built-in pose estimation capability that returns inferred joint positions using depth data. Details about the Kinect V2 pose estimation algorithm are published (<xref ref-type="bibr" rid="B81">81</xref>), whereas the details for the Azure Kinect are not disclosed. Different versions of the Kinect are used in all reviewed papers using RGB-D to measure UE kinematics (<xref ref-type="bibr" rid="B59">59</xref>, <xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B67">67</xref>, <xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B78">78</xref>) except for (<xref ref-type="bibr" rid="B17">17</xref>) which computes the depth map from calibrated stereo cameras. Compared to pose estimation methods that use 2D RGB images only, methods that use RGB-D images appear to provide more accurate estimates (<xref ref-type="bibr" rid="B59">59</xref>) when only a single camera is used, which accurate estimates are necessary for measuring fine movements. However, this may not be the case in multi-camera setups and is a subject for further study.</p>
<sec id="s3b2a"><label>3.2.2.1.</label><title>3D kinematic representation</title>
<p>For the output 3D kinematic representation, none of the UE application studies we review in this category (<xref ref-type="bibr" rid="B59">59</xref>, <xref ref-type="bibr" rid="B65">65</xref>&#x2013;<xref ref-type="bibr" rid="B71">71</xref>, <xref ref-type="bibr" rid="B82">82</xref>&#x2013;<xref ref-type="bibr" rid="B84">84</xref>) involved other pose estimation algorithms that use depth to infer body pose, although algorithms exist (<xref ref-type="bibr" rid="B30">30</xref>). The consensus from these studies, which involved a variety of UE movements, is that the Kinect&#x2019;s pose estimation method is suitable for measuring gross movements but is not suitable for fine movements. For example, the Kinect failed to adequately track shoulder movement (<xref ref-type="bibr" rid="B69">69</xref>), which is an important compensatory movement to measure in clinical settings (e.g., during stroke rehabilitation (<xref ref-type="bibr" rid="B78">78</xref>) and UE prosthesis use (<xref ref-type="bibr" rid="B12">12</xref>&#x2013;<xref ref-type="bibr" rid="B14">14</xref>)). Better methods could be used if real-time processing is not a requirement (<xref ref-type="bibr" rid="B59">59</xref>), whereas the Kinect was developed specifically for gaming and therefore requires real-time pose estimates.</p>
</sec>
<sec id="s3b2b"><label>3.2.2.2.</label><title>3D volumetric representation</title>
<p>3D volumetric representations can also be inferred or fitted from RGB-D images (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B77">77</xref>, <xref ref-type="bibr" rid="B78">78</xref>). Jatesiktat et al. (<xref ref-type="bibr" rid="B78">78</xref>) proposed improving the Kinect V2 3D kinematic pose estimates for the upper body by fitting a human mesh representation (<xref ref-type="bibr" rid="B85">85</xref>) to the depth image, along with using two wrist-worn IMUs to mitigate issues with forearm occlusion. This approach allowed for better tracking of the shoulder, wrist, and elbow compared to using the Kinect pose tracker alone by 25.9&#x0025; across all the evaluation data and 43.7&#x0025; across the cases with occlusion. While (<xref ref-type="bibr" rid="B69">69</xref>) indicated the Kinect pose tracker alone could not provide measurements of fine shoulder movement, the results from (<xref ref-type="bibr" rid="B78">78</xref>) suggest that a 3D volumetric representation can be used to improve 3D kinematic representations. According to Figure 4 in (<xref ref-type="bibr" rid="B78">78</xref>), the proposed method with the IMU improved the average error from approximately 45 to 33&#x2009;mm, although whether this is sufficient for fine shoulder motion is an open question.</p>
</sec>
</sec>
<sec id="s3b3"><label>3.2.3.</label><title>Inertial data input</title>
<p>Wearable IMUs have been studied extensively by the movement science community (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B49">49</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B72">72</xref>&#x2013;<xref ref-type="bibr" rid="B74">74</xref>, <xref ref-type="bibr" rid="B78">78</xref>, <xref ref-type="bibr" rid="B86">86</xref>). Although kinematics from the IMUs in isolation can be used (e.g., motion of the wrist-worn IMU), multiple IMUs attached to the body are used for pose estimation due to sensors being low cost and not suffering from issues associated with occlusion. IMUs have been assessed to be suitable for estimating UE kinematics in the laboratory and clinical settings (<xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B74">74</xref>), but there are challenges associated with widespread usage outside of these controlled settings.</p>
<p>These challenges include sensor calibration, drift over time associated with gyroscopes, and magnetometers being sensitive to certain metals in the environment (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B72">72</xref>). However, a variety of methods exist for calibration, reducing drift, and handling magnetic disturbances (<xref ref-type="bibr" rid="B25">25</xref>), where the extent of these issues (e.g., magnitude of the drift) will depend on what methods are used. For example, (<xref ref-type="bibr" rid="B73">73</xref>) excluded magnetometers from their proposed upper body pose estimation method using IMUs, avoiding magnetometer disturbance concerns, although a comparison of the magnetometer-free method with methods using magnetometers while attempting to minimize magnetic disturbances (<xref ref-type="bibr" rid="B25">25</xref>) was not performed. Newer methods that fuse optical motion capture with IMUs for calibration could make it easier to get relevant kinematic measurements of UE movement (<xref ref-type="bibr" rid="B17">17</xref>). Inertial data, potentially along with other data types that could come from wearables (e.g., electromyography), can also be fused with optical pose estimation approaches to provide potentially better kinematic measurements (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B30">30</xref>).</p>
</sec>
</sec>
<sec id="s3c"><label>3.3.</label><title>Measurements of manipulated systems</title>
<p>While using optical motion capture and IMUs to measure complex functional UE movement kinematics are popular, there are other ways to measure functional UE movement that tend to be application-specific. For example, haptic virtual environments record precise kinematic information via encoders (<xref ref-type="bibr" rid="B87">87</xref>) and provide a customizable workspace to assess functional UE movement, which has uses in rehabilitation (<xref ref-type="bibr" rid="B26">26</xref>) and surgical skill assessment (<xref ref-type="bibr" rid="B88">88</xref>). Kinematic measurements have also been recorded from real laparoscopic box trainers, which has been used to evaluate surgical skill (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B89">89</xref>&#x2013;<xref ref-type="bibr" rid="B93">93</xref>). UE kinematic measurements have been recorded by the objects people manipulate, as is the case with ultrasound probes that have been used to assess the skill of obstetric sonographers (<xref ref-type="bibr" rid="B11">11</xref>). Handwriting on digitizing tablets are used for assessing neurodegenerative diseases (<xref ref-type="bibr" rid="B42">42</xref>, <xref ref-type="bibr" rid="B94">94</xref>) and dysgraphia (<xref ref-type="bibr" rid="B95">95</xref>) using kinematic information of the pen tip and pen pressure on the writing surface.</p>
</sec>
<sec id="s3d"><label>3.4.</label><title>Evaluating measurement methods</title>
<p>Domain experts need to understand how well measurement systems work and whether they can be adopted for their applications. The computationally-oriented literature tends to focus on evaluating the accuracy and run-time of new measurement methods, such as assessing keypoint localization error for 2D pose estimation using the keypoint representation (<xref ref-type="bibr" rid="B30">30</xref>). For adoption in healthcare applications involving UEFAs, test-retest reliability (<xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B68">68</xref>, <xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B96">96</xref>) and validity (<xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B59">59</xref>, <xref ref-type="bibr" rid="B66">66</xref>, <xref ref-type="bibr" rid="B68">68</xref>) need to be assessed. Furthermore, accuracy assessments (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B63">63</xref>, <xref ref-type="bibr" rid="B64">64</xref>, <xref ref-type="bibr" rid="B67">67</xref>, <xref ref-type="bibr" rid="B73">73</xref>, <xref ref-type="bibr" rid="B74">74</xref>, <xref ref-type="bibr" rid="B78">78</xref>) that do not rely solely on healthy participants (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B38">38</xref>) are needed. Although the reviewed pose estimation methods are finding utility in health applications related to measuring UE movement, more widespread adoption of these tools require further assessments of measurement accuracy, validity, and reliability (<xref ref-type="bibr" rid="B9">9</xref>) on quantities that are important to movement scientists (<xref ref-type="bibr" rid="B38">38</xref>), e.g., joint angle (<xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B62">62</xref>).</p>
</sec>
</sec>
<sec id="s4"><label>4.</label><title>Movement segmentation</title>
<sec id="s4a"><label>4.1.</label><title>Background and motivation</title>
<sec id="s4a1"><label>4.1.1.</label><title>Background</title>
<p>Useful kinematic descriptions and analyses require comparing the same types of functional motions across individuals, such as the reaching portion of a trajectory an individual follows to grasp an item (<xref ref-type="bibr" rid="B97">97</xref>). However, movement segmentation is challenging because the UE is complex (e.g., the UE has seven degrees of freedom and can be moved with the torso) and people move differently, even on the same task (<xref ref-type="bibr" rid="B48">48</xref>). Due to the variability in UE movement on a given functional task, segmenting the movement into meaningful parts for analysis is a manually intensive process and can be the most time-consuming part of the kinematic analysis process. Therefore, targeting research efforts to alleviate the burden of segmenting kinematic data would have considerable impact on the kinematic analysis process across many applications.</p>
<p>A segmentation procedure has two outputs: (1) the start and stop timestamps of the motion sequence and (2) what type of motion the sequence is (<xref ref-type="bibr" rid="B98">98</xref>&#x2013;<xref ref-type="bibr" rid="B100">100</xref>). This requirement means that a sequence can potentially have multiple classes of motions, which is considered a more challenging problem than predicting the motion class of an already trimmed segment consisting of only one class. The simplest and most time-consuming way to do this is to manually segment the data based on descriptions of the movement (see &#x201C;Describe&#x201D; in <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>) and video recordings (<xref ref-type="bibr" rid="B48">48</xref>). The development and usage of automated movement segmentation algorithms will help reduce the cost and burden associated with conducting kinematic assessments, especially as kinematic assessments become more prevalent and the amount of data needing processing exceeds the capacity of current workflows used in the research setting.</p>
</sec>
<sec id="s4a2"><label>4.1.2.</label><title>Organizing hierarchy</title>
<p>The computational literature inconsistently labels levels of functional motion, which can be confusing when trying to identify which segmentation methods are suitable for a particular application. For example, the definition of an action in (<xref ref-type="bibr" rid="B34">34</xref>) differs from (<xref ref-type="bibr" rid="B101">101</xref>), where (<xref ref-type="bibr" rid="B101">101</xref>) analyzes more complex activities. A recent partonomy-based activity recognition method proposed a general structure for categorizing human movements, which included the activity, sub-activity, and atomic action categories (<xref ref-type="bibr" rid="B102">102</xref>). These categories are comparable to the hierarchical levels adopted by (<xref ref-type="bibr" rid="B2">2</xref>) in their UE functional motion hierarchy, which includes activities, functional movements, and functional primitives. This review is targeted towards helping computational researchers better understand the kinematic analysis process for UEFAs and domain experts understand the tools that are available to them. Therefore, this review follows the terminology from the health literature and adopts the hierarchy from (<xref ref-type="bibr" rid="B2">2</xref>) to organize the reviewed segmentation approaches, with other researchers also recommending this hierarchy for segmentation (<xref ref-type="bibr" rid="B103">103</xref>).</p>
<p>The UE functional motion hierarchy (see <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>) used in this paper has the following three levels: <italic>activities</italic>, such as eating dinner; <italic>functional movements</italic>, such as drinking water or tasting a spoonful of soup; and <italic>functional primitives</italic>, which are short and discrete movements, such as reaching, transport, grasping, stabilizing, idling, and repositioning (<xref ref-type="bibr" rid="B2">2</xref>). This hierarchy captures the idea that functional motions can be decomposed into different levels of motion with decreasing duration and complexity, with the more granular motions serving as building blocks for more complex motions.</p>
<table-wrap id="T3" position="float"><label>Table 3</label>
<caption><p>Upper extremity functional motion hierarchy (<xref ref-type="bibr" rid="B2">2</xref>).</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Hierarchy layer</th>
<th valign="top" align="left">Goals (i.e., tasks)</th>
<th valign="top" align="left">Duration</th>
<th valign="top" align="left">Examples</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Activities (broad, see Section <xref ref-type="sec" rid="s4c">4.3</xref>)</td>
<td valign="top" align="left">Many</td>
<td valign="top" align="left">Minutes to hours</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM8"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Cooking dinner</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM9"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Bathing</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM10"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Putting clothing on</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Functional movements<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM11"><mml:msup><mml:mi></mml:mi><mml:mi>a</mml:mi></mml:msup></mml:math></inline-formula> (see Section <xref ref-type="sec" rid="s4d">4.4</xref>)</td>
<td valign="top" align="left">Few</td>
<td valign="top" align="left">Seconds</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM12"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Tasting sauce</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM13"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Putting arm through sleeve</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM14"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Zipping up jacket</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM15"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Tying shoelace</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Functional primitives (granular, see Section <xref ref-type="sec" rid="s4e">4.5</xref>)</td>
<td valign="top" align="left">One</td>
<td valign="top" align="left">Sub-seconds to seconds</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM16"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Reach</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM17"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Reposition</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM18"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Grasp</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM19"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Transport</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM20"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Stabilize</p></list-item>
<list-item><label><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM21"><mml:mo>&#x2219;</mml:mo></mml:math></inline-formula>&#x2002;</label><p>Idle</p></list-item>
</list></td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="table-fn2"><p><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM22"><mml:msup><mml:mi></mml:mi><mml:mi>a</mml:mi></mml:msup></mml:math></inline-formula>Has also been referred to as actions (<xref ref-type="bibr" rid="B2">2</xref>).</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s4a3"><label>4.1.3.</label><title>Necessity of different segmentation levels</title>
<p>Suppose a rehabilitation specialist is interested in evaluating the kinematics associated with how individuals make a salad within a standardized setup (see <xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref>). The activity is known&#x2014;making a salad&#x2014;but there are multiple tasks an individual must do, such as grabbing a bottle of vinegar for the dressing and cutting a tomato. Ideally, these different tasks would be segmented so that kinematic measures can be used for comparison for the same task, either across groups or over time. One option is for the clinician to do this manually, but that is time consuming. Another option is to use algorithms that automatically identify these different tasks. These algorithms address the problem of <bold>functional movement segmentation</bold>.</p>
<fig id="F3" position="float"><label>Figure 3</label>
<caption><p>Single frame from the 50 Salads data set (<xref ref-type="bibr" rid="B104">104</xref>) (license under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License: <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by-nc-sa/4.0/">https://creativecommons.org/licenses/by-nc-sa/4.0/</ext-link>), which required individuals to make salads. Data includes RGB-D video, accelerometer data from utensils, and functional movement labels.</p></caption>
<graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fresc-04-1130847-g003.tif"/>
</fig>
<p>Suppose the tasks have now been segmented and the clinician would like to analyze the functional primitive kinematics of how individuals reach for and grasp the bottle of vinegar (e.g., some kinematic analyses require primitive segmentation (<xref ref-type="bibr" rid="B97">97</xref>, <xref ref-type="bibr" rid="B105">105</xref>, <xref ref-type="bibr" rid="B106">106</xref>)). This reaching motion consists of multiple functional primitives (see <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>) and would need to be segmented. Again, one option would be to manually segment the primitives (<xref ref-type="bibr" rid="B48">48</xref>). However, there are algorithms that focus specifically on automating <bold>functional primitive segmentation</bold> (e.g., (<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B106">106</xref>)). Given the distinction between the algorithms for segmenting functional movements and primitives, one section is dedicated to each in this review. Furthermore, functional movement segmentation algorithms are usually disjoint from functional primitive segmentation algorithms.</p>
<sec id="s4a3a"><label>4.1.3.1.</label><title>An exception</title>
<p>Although movement segmentation is widely done in the health and computational literature, e.g., some measures of movement smoothness require it (<xref ref-type="bibr" rid="B105">105</xref>), there are some examples of computational approaches that skip movement segmentation altogether (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B107">107</xref>). Kinematic measures of surgical skill from the entirety of each surgical training task have been used (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>), i.e., motions during tasks were not segmented. Similarly, (<xref ref-type="bibr" rid="B107">107</xref>) proposed a surgical skill evaluation approach that explicitly does not require segmentation.</p>
</sec>
</sec>
</sec>
<sec id="s4b"><label>4.2.</label><title>Data sets</title>
<p><xref ref-type="table" rid="T4">Table&#x00A0;4</xref> includes publicly available data sets with labeled activity and functional UE movements, although most of these data sets also include non-UE motion. These data sets have sequences with potentially multiple segment classes, requiring temporal segmentation. We are not aware of publicly available data sets with labeled motions found in clinically validated UEFAs, which are necessary for reducing the burden associated with the UE kinematic analysis workflow. Lin (<xref ref-type="bibr" rid="B108">108</xref>) identified rehabilitation-focused data sets for the UE and lower extremity. However, the only rehabilitation-focused UE functional motion data set (<xref ref-type="bibr" rid="B109">109</xref>) could not be found online. Zhang et al. (<xref ref-type="bibr" rid="B99">99</xref>) and Hu et al. (<xref ref-type="bibr" rid="B100">100</xref>) include state-of-the-art action detection performance measures for a variety of data sets, and most of the referenced data sets in <xref ref-type="table" rid="T4">Table&#x00A0;4</xref> include benchmark performance results using supervised and unsupervised approaches.</p>
<table-wrap id="T4" position="float"><label>Table 4</label>
<caption><p>Publicly available activity and functional motion data sets with segment labels.</p></caption>
<table frame="hsides" rules="groups">
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Data set</th>
<th valign="top" align="left">Topic</th>
<th valign="top" align="left">Task(s)</th>
<th valign="top" align="left">Participants</th>
<th valign="top" align="left">System</th>
<th valign="top" align="left">Labels</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">JIGSAWS (<xref ref-type="bibr" rid="B91">91</xref>)</td>
<td valign="top" align="left">Surgical activity</td>
<td valign="top" align="left">Suturing, knot-tying, needle passing</td>
<td valign="top" align="left">8 surgeons of varying skill</td>
<td valign="top" align="left">Robotic kinematics; stereo video</td>
<td valign="top" align="left">Surgical skill; functional movements and primitives</td>
</tr>
<tr>
<td valign="top" align="left">50 Salads (<xref ref-type="bibr" rid="B104">104</xref>)</td>
<td valign="top" align="left">ADL<inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM23"><mml:msup><mml:mi></mml:mi><mml:mi>a</mml:mi></mml:msup></mml:math></inline-formula></td>
<td valign="top" align="left">Food (salad) preparation</td>
<td valign="top" align="left">27 able-bodied individuals</td>
<td valign="top" align="left">Accelerometry from objects; RGB-D</td>
<td valign="top" align="left">Functional movements</td>
</tr>
<tr>
<td valign="top" align="left">Breakfast Actions (<xref ref-type="bibr" rid="B110">110</xref>)</td>
<td valign="top" align="left">ADL</td>
<td valign="top" align="left">Varying cooking tasks</td>
<td valign="top" align="left">52 able-bodied individuals</td>
<td valign="top" align="left">Markerless video capture</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">EGTEA Gaze+ (<xref ref-type="bibr" rid="B111">111</xref>)</td>
<td valign="top" align="left">ADL</td>
<td valign="top" align="left">Varying cooking tasks</td>
<td valign="top" align="left">32 able-bodied individuals</td>
<td valign="top" align="left">Markerless video capture with gaze tracking</td>
<td valign="top" align="left">Functional movements</td>
</tr>
<tr>
<td valign="top" align="left">TUM Kitchen (<xref ref-type="bibr" rid="B112">112</xref>)</td>
<td valign="top" align="left">ADL</td>
<td valign="top" align="left">Object interaction</td>
<td valign="top" align="left">4 able-bodied individuals</td>
<td valign="top" align="left">Markerless video capture; RFID on objects</td>
<td valign="top" align="left">Functional movements</td>
</tr>
<tr>
<td valign="top" align="left">UW IOM (<xref ref-type="bibr" rid="B113">113</xref>)</td>
<td valign="top" align="left">ADL</td>
<td valign="top" align="left">Object interaction</td>
<td valign="top" align="left">20 able-bodied individuals</td>
<td valign="top" align="left">Kinect RGB-D camera</td>
<td valign="top" align="left">Functional movements</td>
</tr>
<tr>
<td valign="top" align="left">LARa (<xref ref-type="bibr" rid="B114">114</xref>)</td>
<td valign="top" align="left">Logistics</td>
<td valign="top" align="left">Picking and packaging</td>
<td valign="top" align="left">14 able-bodied individuals</td>
<td valign="top" align="left">Maker and markerless video capture; IMUs</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">CAARL (<xref ref-type="bibr" rid="B115">115</xref>)</td>
<td valign="top" align="left">Logistics</td>
<td valign="top" align="left">Picking and packaging</td>
<td valign="top" align="left">2 able-bodied individuals</td>
<td valign="top" align="left">Marker and markerless video capture on person and objects</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">AVA-Kinetics (<xref ref-type="bibr" rid="B116">116</xref>)</td>
<td valign="top" align="left">Varied</td>
<td valign="top" align="left">Varied object and person interactions</td>
<td valign="top" align="left">Not reported (large data set)</td>
<td valign="top" align="left">Markerless video capture (Youtube)</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">Something-Something V2 (<xref ref-type="bibr" rid="B117">117</xref>, <xref ref-type="bibr" rid="B118">118</xref>)</td>
<td valign="top" align="left">Varied</td>
<td valign="top" align="left">Object interaction</td>
<td valign="top" align="left">Not reported (large data set)</td>
<td valign="top" align="left">Markerless video capture (crowd sourced)</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">HMDB51 (<xref ref-type="bibr" rid="B119">119</xref>)</td>
<td valign="top" align="left">Varied</td>
<td valign="top" align="left">Varied object and person interactions</td>
<td valign="top" align="left">Not reported (large data set)</td>
<td valign="top" align="left">Markerless video capture (Youtube, movies)</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">UCF101 (<xref ref-type="bibr" rid="B120">120</xref>)</td>
<td valign="top" align="left">Varied</td>
<td valign="top" align="left">Object and human interaction; body motions</td>
<td valign="top" align="left">Not reported (large data set)</td>
<td valign="top" align="left">Markerless video capture (Youtube)</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">MOMA (<xref ref-type="bibr" rid="B102">102</xref>)</td>
<td valign="top" align="left">Varied</td>
<td valign="top" align="left">Object and human interaction</td>
<td valign="top" align="left">Not reported (large data set)</td>
<td valign="top" align="left">Markerless video capture (Youtube)</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">Action Genome (<xref ref-type="bibr" rid="B121">121</xref>)</td>
<td valign="top" align="left">Varied</td>
<td valign="top" align="left">Object interaction</td>
<td valign="top" align="left">Not reported (large data set)</td>
<td valign="top" align="left">Markerless video capture (Amazon Mechanical Turk)</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">Ego4D (<xref ref-type="bibr" rid="B122">122</xref>)</td>
<td valign="top" align="left">Varied</td>
<td valign="top" align="left">Varied, including person and object interaction</td>
<td valign="top" align="left">923 participants from multiple countries</td>
<td valign="top" align="left">Egocentric RGB, IMUs, gaze, and audio</td>
<td valign="top" align="left">Activities and functional movements</td>
</tr>
<tr>
<td valign="top" align="left">BEHAVIOR-1K (<xref ref-type="bibr" rid="B123">123</xref>)</td>
<td valign="top" align="left">ADL</td>
<td valign="top" align="left">Object interaction</td>
<td valign="top" align="left">None (simulation)</td>
<td valign="top" align="left">Simulation</td>
<td valign="top" align="left">Activities</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="table-fn3"><p><inline-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML" id="IM24"><mml:msup><mml:mi></mml:mi><mml:mi>a</mml:mi></mml:msup></mml:math></inline-formula>Activities of daily living (ADL).</p></fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s4c"><label>4.3.</label><title>Activity segmentation</title>
<p>Activities are the highest level in the functional motion hierarchy (see <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>). Activity recognition (<xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B36">36</xref>, <xref ref-type="bibr" rid="B102">102</xref>) is useful for assessments where individuals are being evaluated in their natural environments throughout the day, where it may be useful to automatically identify activities an individual is doing. At-home health monitoring is especially important to clinicians because improvement in clinical measures does not necessarily mean UE performance improvement in free-living and unstructured environments (<xref ref-type="bibr" rid="B49">49</xref>), where improvements in the latter is the goal.</p>
<p>Human activity recognition has received attention from the computational research community; however, many of the methods need labeled data, which are not rehabilitation specific (see <xref ref-type="table" rid="T4">Table&#x00A0;4</xref>). Additionally, activity recognition as part of UEFAs is a relatively undeveloped area. Inaccurate commodity measurement systems (e.g., wearable sensors), non-validated outcome measures, and human factors challenges are current barriers to use of data capture and analysis (<xref ref-type="bibr" rid="B86">86</xref>). Furthermore, current measures used for at-home UEFAs are not activity-specific and instead summarize different aspects of UE usage throughout the day (<xref ref-type="bibr" rid="B49">49</xref>, <xref ref-type="bibr" rid="B103">103</xref>). Activity recognition methods are useful for at-home UEFAs (<xref ref-type="bibr" rid="B84">84</xref>, <xref ref-type="bibr" rid="B103">103</xref>), but their utility is not as well defined compared to the automated segmentation of functional movements and primitives (see <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>). This paper does not thoroughly review human activity recognition methods due these aforementioned issues. Additionally, the activities performed during UEFAs, which are most commonly done in clinics, research labs, or as part of job-related assessments and training, are pre-defined to include only the activities of interest.</p>
</sec>
<sec id="s4d"><label>4.4.</label><title>Functional movement segmentation</title>
<p>Functional movement segmentation approaches, also known as human action detection in the computational literature (<xref ref-type="bibr" rid="B98">98</xref>&#x2013;<xref ref-type="bibr" rid="B100">100</xref>), typically use supervised or unsupervised learning. Progress in functional movement segmentation algorithms for UE functional motions has benefited from the availability of labeled data sets. Algorithmic development has therefore been largely focused on these well-annotated data sets because it is easier to compare and evaluate algorithms.</p>
<sec id="s4d1"><label>4.4.1.</label><title>Supervised learning</title>
<p>Combined segmentation and classification has been approached from a supervised learning perspective using only kinematic data (<xref ref-type="bibr" rid="B124">124</xref>, <xref ref-type="bibr" rid="B125">125</xref>), kinematic and video data together (<xref ref-type="bibr" rid="B124">124</xref>, <xref ref-type="bibr" rid="B125">125</xref>), or video data alone (<xref ref-type="bibr" rid="B124">124</xref>&#x2013;<xref ref-type="bibr" rid="B126">126</xref>). These approaches have the goal of densely labeling all time-steps in the sequential data with functional movement class out of multiple classes. This differs from computational methods that assume the start and end points of the segments are given (<xref ref-type="bibr" rid="B127">127</xref>, <xref ref-type="bibr" rid="B128">128</xref>), thereby reducing the problem to simply classifying the given segments. However, this is not a reasonable assumption for real-world UEFA use cases. Additionally, kinematic information alone has primarily been used in the health literature to segment movement (<xref ref-type="bibr" rid="B48">48</xref>), whereas contextual features related to the objects being manipulated (e.g., distance from hand to nearest object) have been used for segmenting surgical motions (<xref ref-type="bibr" rid="B125">125</xref>). These supervised learning algorithms may also work for a variety of functional movements if labeled data are available, as is done in (<xref ref-type="bibr" rid="B125">125</xref>).</p>
</sec>
<sec id="s4d2"><label>4.4.2.</label><title>Unsupervised learning</title>
<p>Unsupervised approaches to movement segmentation do not require ground truth labels for training but tend to assume that there are repeated patterns in the movements (<xref ref-type="bibr" rid="B129">129</xref>, <xref ref-type="bibr" rid="B130">130</xref>). These methods can also use a variety of data sources, such as only the end effector kinematics (<xref ref-type="bibr" rid="B131">131</xref>) or both kinematic and video features (<xref ref-type="bibr" rid="B129">129</xref>) for robotic surgery motion segmentation. More general unsupervised segmentation approaches can also use the whole body pose (i.e., multiple anatomical landmarks) (<xref ref-type="bibr" rid="B130">130</xref>).</p>
</sec>
</sec>
<sec id="s4e"><label>4.5.</label><title>Functional primitive segmentation</title>
<p>Lin et al. (<xref ref-type="bibr" rid="B48">48</xref>) provides an organizing framework for functional primitive segmentation, which includes online and offline methods. A variety of approaches are reviewed in (<xref ref-type="bibr" rid="B48">48</xref>) for general primitive segmentation (i.e., includes full-body primitives and gesture recognition) that apply to a variety of tasks (i.e., many UE functional motions require reaching, grasping, etc.). This section focuses specifically on methods for UE functional motion, either of the UE or an end effector.</p>
<p>Feature vector thresholds and zero-crossings (<xref ref-type="bibr" rid="B48">48</xref>) work well for simple actions and small data sets that allow researchers to visually verify the movement segments. Engdahl and Gates (<xref ref-type="bibr" rid="B97">97</xref>) segmented functional UE movement during pre-defined activities of daily living (ADLs) into reaching and object manipulation phases using a fixed-velocity magnitude threshold. Cowley et al. (<xref ref-type="bibr" rid="B12">12</xref>) and Engdahl and Gates (<xref ref-type="bibr" rid="B15">15</xref>) evaluated UE prosthesis users compared to able-bodied individuals on a set of standardized ADLs and segmented the movement primitives using pre-defined velocity magnitude thresholds. Li et al. (<xref ref-type="bibr" rid="B132">132</xref>) accounted for differences in participant kinematics while transporting objects by selecting 50&#x0025; of movement time as when the hand reached a target position.</p>
<p>Approaches that use thresholds and zero-crossing tend not to perform well with complex functional movements (<xref ref-type="bibr" rid="B48">48</xref>), particularly for reaching motions (<xref ref-type="bibr" rid="B133">133</xref>). Some measurement systems allow for the collection of events (i.e., additional context about what the individual is doing, such as making contact with objects) in addition to kinematics, such as in haptic virtual environments. These events can be used to indicate action segments, e.g., person grasped object, person released object (<xref ref-type="bibr" rid="B26">26</xref>). Jackson et al. (<xref ref-type="bibr" rid="B26">26</xref>) showed that primitive segmentation, such as reaching and grasping, using a fixed-velocity magnitude threshold can result in incorrect primitive segments, requiring more robust computational approaches. To remedy this, (<xref ref-type="bibr" rid="B26">26</xref>) proposed a movement primitive segmentation approach that uses distance from the object and event recordings to segment reaching from object manipulation. This method has since been used to segment the reach and dwell primitives of pen point trajectories during the Trail Making Test to assess cognitive function (<xref ref-type="bibr" rid="B106">106</xref>).</p>
<p>Additional approaches to segmenting UE movement primitives include using 2D hand trajectories for identifying different hand-drawn shapes by segmenting the trajectory into strokes based on large changes in the angle between line segments and the horizontal axis (<xref ref-type="bibr" rid="B134">134</xref>). Motivated by robotic imitation learning, visual information, specifically kinematics derived from the Kinect pose estimation software, has been used to segment functional UE movements into reaching, manipulation, and release (<xref ref-type="bibr" rid="B135">135</xref>).</p>
</sec>
<sec id="s4f"><label>4.6.</label><title>Evaluating segmentation performance</title>
<p>Given labeled data sets for both functional movements and primitives, segmentation evaluation measures include accuracy, precision, recall, overlap between ground truth and predicted segment classes, and the ordering of predicted segments (<xref ref-type="bibr" rid="B48">48</xref>, <xref ref-type="bibr" rid="B98">98</xref>, <xref ref-type="bibr" rid="B125">125</xref>). Unsupervised and supervised functional primitive segmentation algorithms can use the same data sets for evaluation (e.g., as has been done with JIGSAWS (<xref ref-type="bibr" rid="B91">91</xref>)). However, due to challenges associated with creating ground truth labels for functional primitives, verification of temporal segmentation results is limited (<xref ref-type="bibr" rid="B48">48</xref>). One of the major challenges with acquiring data sets of motion primitives is that it is still unclear what separates the different primitive phases using kinematics alone, especially given variations in pathologies, impairments, and movement strategies. Similarly, functional movement labels are not reliably identified across raters (<xref ref-type="bibr" rid="B98">98</xref>). Additionally, many of the available data sets focus on healthy, able-bodied populations, which may not properly indicate whether a segmentation approach will generalize to populations of interest to domain experts (<xref ref-type="bibr" rid="B48">48</xref>).</p>
</sec>
</sec>
<sec id="s5"><label>5.</label><title>Description and analysis</title>
<p>The description and analysis phase (see definitions in <xref ref-type="table" rid="T1">Table&#x00A0;1</xref>) of the kinematic analysis process (see <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>) converts the measured and segmented kinematic data into a format usable by a domain expert to inform their decisions about treatment or interventions. The measurement accuracy and segmentation requirements for specific descriptions and analyses informs what measurement and segmentation methods are suitable for use. Besides these requirements, descriptions and analyses are not necessarily tied to specific measurement and segmentation approaches.</p>
<p>Common kinematic descriptions of UE functional motion include plots of kinematics for a single point (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B97">97</xref>, <xref ref-type="bibr" rid="B127">127</xref>), e.g., position trajectory and velocity magnitude of the wrist. More advanced visualizations include plotting joint angle time series during functional movements (<xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B21">21</xref>), highlighting compensatory UE motions (<xref ref-type="bibr" rid="B136">136</xref>), and visualizations of UE function and activity in free-living environments (<xref ref-type="bibr" rid="B103">103</xref>). A comprehensive review of functional UE motion descriptions and analyses is beyond the scope of this paper, with multiple reviews and studies of kinematic analyses already published for UE movements after stroke (<xref ref-type="bibr" rid="B16">16</xref>), UE functional impairment measures (<xref ref-type="bibr" rid="B8">8</xref>), handwriting (<xref ref-type="bibr" rid="B42">42</xref>, <xref ref-type="bibr" rid="B95">95</xref>), and quantifying laparascopic surgical skill (<xref ref-type="bibr" rid="B51">51</xref>, <xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B107">107</xref>). Instead, we note a few directions where computer-assisted methods could support the description and analysis phase.</p>
<sec id="s5a"><label>5.1.</label><title>Automating existing measures vs. creating new ones</title>
<p>Given that many existing clinical measures are already validated and well-known by rehabilitation professionals (<xref ref-type="bibr" rid="B41">41</xref>), automating these measures may offer additional benefit because clinicians are already familiar with them and would benefit from potential resource or time savings. For example, (<xref ref-type="bibr" rid="B137">137</xref>) used machine learning to infer clinically validated scores of UE motor impairment and movement quality in stroke and traumatic brain injury survivors using wearable sensor data. Barth et al. (<xref ref-type="bibr" rid="B138">138</xref>) evaluated a method for predicting the UE functional capacity, as defined by the Action Research Arm Test score, of individuals with first-ever stroke using early clinical measures and participant age.</p>
<p>Development, evaluation, and automation of currently non-validated measures, such as some that use kinematic data, should also continue in parallel to automating the output of validated measures. For instance, clinically relevant gait parameters (e.g., walking speed, cadence) and validated gait measures (e.g., the Gait Deviation Index and the Gross Motor Function Classification System score) have been inferred from 2D keypoint human pose estimates using a single RGB camera (<xref ref-type="bibr" rid="B79">79</xref>); a methodology which could apply to UE functional motion. An UE-specific example is the development of a kinematic-based quantitative measure of UE movement quality post-stroke from motions performed during two widely used qualitative assessments, where the quantitative measure was found to be strongly correlated with the qualitative assessment results (<xref ref-type="bibr" rid="B83">83</xref>). Similarly, a measure of movement quality from UE kinematics of individuals with chronic stroke symptoms captured during a rehabilitation game was evaluated against established UEFAs (<xref ref-type="bibr" rid="B84">84</xref>).</p>
<p>Note that kinematic descriptions and analyses tend to be explainable and expert-derived, e.g., in contrast with representation learning. This is largely due to intervention decisions being the responsibility of a human that must be able to interpret the data. However, this does not preclude the use of methods such as deep learning to help with analysis, as is the case with (<xref ref-type="bibr" rid="B79">79</xref>).</p>
</sec>
<sec id="s5b"><label>5.2.</label><title>Time series data mining</title>
<p>Time series data mining techniques have been successfully used for segmenting motions and analyzing skill in the robotic surgery setting (<xref ref-type="bibr" rid="B127">127</xref>, <xref ref-type="bibr" rid="B131">131</xref>, <xref ref-type="bibr" rid="B139">139</xref>&#x2013;<xref ref-type="bibr" rid="B141">141</xref>). Some of these methods have also been used for the analysis phase, such as converting trajectories to string representations (e.g., symbolic aggregate approximations (SAX) (<xref ref-type="bibr" rid="B127">127</xref>)) and comparing time series using a method called dynamic time warping (DTW) (<xref ref-type="bibr" rid="B131">131</xref>, <xref ref-type="bibr" rid="B139">139</xref>, <xref ref-type="bibr" rid="B140">140</xref>). DTW is useful because it allows the measurement of similarity between two time series with varying speeds. The motivation for these works is that surgical motion classes (e.g., grab needle, pull needle, rotate suture once (<xref ref-type="bibr" rid="B141">141</xref>)) and surgical skill levels follow distinctive patterns. While movement segmentation are often a focus of these works, the use of DTW represents a direction where kinematic measures are computed based on comparisons, as opposed to computing a measure from an individual&#x2019;s kinematics only. For example, (<xref ref-type="bibr" rid="B139">139</xref>) computed a score based on how the trajectories of the robotic instrument tips compared to &#x201C;optimal&#x201D; trajectories during a simulated surgical task. While it is unclear what an optimal trajectory would be in a clinical setting, the time series data mining techniques these surgical motion segmentation and skill evaluation methods use could be relevant for identifying patterns in functional UE motion on standardized tasks.</p>
</sec>
<sec id="s5c"><label>5.3.</label><title>Dimensionality reduction</title>
<p>As more measures are developed and validated, it is possible that for a particular functional motion there could be many measures used to describe it (<xref ref-type="bibr" rid="B49">49</xref>). Another research direction is to use computational methods for visualizing high dimensional data, such as using t-SNE (<xref ref-type="bibr" rid="B142">142</xref>), UMAP (<xref ref-type="bibr" rid="B143">143</xref>), or principal component analysis (<xref ref-type="bibr" rid="B49">49</xref>, <xref ref-type="bibr" rid="B83">83</xref>). Using dimensionality reduction techniques to project high dimensional data to two or three dimensions for plotting could be useful for seeing how the evaluated individual compares to others.</p>
</sec>
<sec id="s5d"><label>5.4.</label><title>Validation and standardization</title>
<p>The lack of validated and standardized kinematic-based outcome measures are a substantial barrier to more widespread usage of kinematics by domain experts (<xref ref-type="bibr" rid="B16">16</xref>). Although domain-specific researchers are likely better positioned to address this problem, computer-assisted tools that make descriptions and analyses easier to acquire will enable a wider group of domain-specific researchers to develop and evaluate kinematic-based outcome measures.</p>
</sec>
</sec>
<sec id="s6"><label>6.</label><title>Assessment and interpretation</title>
<p>Following the definition in <xref ref-type="table" rid="T1">Table&#x00A0;1</xref>, this phase involves the assessment and interpretation of kinematic-based outcome measures to inform decisions about training or clinical interventions. At this stage in the workflow depicted in <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>, the data have been measured, segmented according to the needs of a particular analysis, and descriptions or analyses have been computed. Staying within this review&#x2019;s scope of developing computer-assisted tools to better support the kinematics analysis process, two areas are considered: (1) automating the assessment and interpretation of kinematic measures, and (2) making descriptions and analyses from the previous workflow stage available to domain experts for interpretation.</p>
<sec id="s6a"><label>6.1.</label><title>Automating assessment and interpretation</title>
<p>Although adoption of kinematic analyses is currently limited, researchers have recently used machine learning and artificial intelligence to automate aspects of the interpretation and assessment process (<xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B42">42</xref>, <xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B94">94</xref>, <xref ref-type="bibr" rid="B95">95</xref>, <xref ref-type="bibr" rid="B107">107</xref>, <xref ref-type="bibr" rid="B144">144</xref>&#x2013;<xref ref-type="bibr" rid="B146">146</xref>). Whereas machine learning in the previous section is used to output outcome measures that a domain expert would interpret as part of their decision-making process, the methods considered here automatically output an assessment (e.g., the presence of a disease) based on input kinematic-based outcome measures (see Section <xref ref-type="sec" rid="s5">5</xref>). Pereira et al. (<xref ref-type="bibr" rid="B144">144</xref>) provides a systematic review of machine learning approaches and data sets for inferring the diagnosis of Parkinson&#x2019;s Disease using kinematic measurements, among other data sources. Classification models trained on kinematic features have been used to predict the skill level of laparoscopic surgeons (<xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B107">107</xref>). Handwriting on consumer tablets has been used for automated diagnosis of dysgraphia (<xref ref-type="bibr" rid="B95">95</xref>) and neurological disease (<xref ref-type="bibr" rid="B42">42</xref>, <xref ref-type="bibr" rid="B94">94</xref>, <xref ref-type="bibr" rid="B145">145</xref>, <xref ref-type="bibr" rid="B146">146</xref>).</p>
</sec>
<sec id="s6b"><label>6.2.</label><title>Interfacing with kinematic measures</title>
<p>How domain experts physically interface with kinematic-based outcome measures, either from UEFAs or during free living, has also been studied from the perspective of human-centered design (<xref ref-type="bibr" rid="B147">147</xref>, <xref ref-type="bibr" rid="B148">148</xref>). These outcome measures are just one of a variety of inputs domain experts use in their assessments, necessitating consideration of how to integrate these various inputs into a system easily used by domain experts. For example, a 2020 survey on requirements for a post-stroke UE rehabilitation mobile application showed that rehabilitation clinicians in the United States and Ethiopia valued the ability to record video of UE function, automatically update performance measures, graphically display patient performance in a number of factors, and see current quality of life and pain levels, among other desired features (<xref ref-type="bibr" rid="B147">147</xref>). Similarly, in (<xref ref-type="bibr" rid="B148">148</xref>), rehabilitation clinicians qualitatively evaluated a prototype dashboard that visualized UE movement information in stroke patients. The dashboard was then revised based on their feedback and presented in (<xref ref-type="bibr" rid="B148">148</xref>). User studies like these will be essential to successfully integrating kinematics analyses into domain expert workflows.</p>
</sec>
</sec>
<sec id="s7"><label>7.</label><title>Outlook</title>
<sec id="s7a"><label>7.1.</label><title>Measurement</title>
<p>A variety of tools are available for measuring kinematics, from low-cost optical and wearable sensors to high-cost optical motion capture systems. Although not offering the same accuracy as optical motion capture, low-cost sensors and pose estimation algorithms provide an opportunity for wider usage of kinematics for specific applications, primarily for measuring gross movements (<xref ref-type="bibr" rid="B71">71</xref>, <xref ref-type="bibr" rid="B83">83</xref>, <xref ref-type="bibr" rid="B84">84</xref>). Integration of these more flexible systems will depend on developing more accurate human pose estimation methods that generalize to populations of interest to domain experts (<xref ref-type="bibr" rid="B17">17</xref>), measure relevant quantities for the particular domain, e.g., 3D pose and joint angles (<xref ref-type="bibr" rid="B38">38</xref>), and are shown to be reliable, responsive, and valid (<xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B86">86</xref>). Wider adoption of low-cost measurement sensors depends on whether the data from these systems combined with specific kinematic UEFAs are demonstrated to be valid and reliable, as is done in (<xref ref-type="bibr" rid="B83">83</xref>, <xref ref-type="bibr" rid="B84">84</xref>). Alternatively, kinematic UEFAs that indicate an acceptable measurement error range could help identify what movement measurement approaches to use in practice. Ease-of-use is another barrier to using markerless pose estimation methods more widely. Some works have developed software packages that are more accessible (e.g., two or more smartphones can be used, user-friendly application) outside of the laboratory, while also incorporating methodological modifications to improve kinematic quantities (e.g., body models) (<xref ref-type="bibr" rid="B62">62</xref>). Ease-of-use may also be why the movement science community has frequently used the Kinect for markerless motion capture (see section <xref ref-type="sec" rid="s3b2a">3.2.2.1</xref>), where the Kinect has a built in pose estimation capability accessible via a relatively simple application programming interface (API).</p>
</sec>
<sec id="s7b"><label>7.2.</label><title>Movement segmentation</title>
<p>A variety of methods exist for movement segmentation, which could help automate the processing of data before analysis and interpretation by a domain expert. Movement segmentation, along with measurement, represent the most costly and burdensome parts of the kinematic analysis workflow for UEFAs that computer-assisted methods could help address. Current segmentation workflows used by researchers will not scale to the volume of data expected as kinematic measurements and analyses become more prevalent outside the laboratory. Automated segmentation approaches, along with improved measurement approaches, will enable more widespread kinematic data capture and processing, especially in unconstrained natural environments, e.g., at home. More accessible kinematic data capture and segmentation would give a wider range of domain experts access to kinematics analyses to support the further validation and standardization (<xref ref-type="bibr" rid="B16">16</xref>) of kinematics-based outcome measures and the administration of measures that have been validated.</p>
<p>In addition to the outstanding problems of algorithm generalizability and the general lack of algorithm verification due to difficulties with acquiring labeled data (<xref ref-type="bibr" rid="B48">48</xref>), motion hierarchies (see <xref ref-type="table" rid="T3">Table&#x00A0;3</xref>) tend to be inconsistently defined. Although there appears to be agreement across the computational and health literature that there are at least three levels of motion (<xref ref-type="bibr" rid="B2">2</xref>, <xref ref-type="bibr" rid="B102">102</xref>), different names for these levels could be confusing to domain experts and limit their application. Consensus on a functional motion hierarchy amongst computational researchers and domain experts will be necessary for segmentation algorithm development. Standardization of a functional motion hierarchy will help researchers curate more relevant data sets, where those data sets will be essential to further development of segmentation algorithms for evaluation and learning-based algorithms. The lack of relevant, rehabilitation-focused data sets that follow a standardized motion hierarchy needs considerable attention by the research community, where the requirements for those data sets will require expertise from both computational researchers and domain experts.</p>
</sec>
<sec id="s7c"><label>7.3.</label><title>Description and analysis</title>
<p>The validation of kinematic measures is essential to more widespread usage. However, a valid kinematic measure computed using a specific measurement and segmentation approach may not be valid using another type of measurement and segmentation approach, making it difficult to generalize kinematic measures that have not been validated with a particular set of measurement and segmentation methods (<xref ref-type="bibr" rid="B9">9</xref>). Furthermore, we believe that computing the kinematic descriptions and analyses themselves is not a burdensome aspect of the kinematic analysis process if the kinematic data are accurate (section <xref ref-type="sec" rid="s3">3</xref>) and properly segmented (section <xref ref-type="sec" rid="s4">4</xref>).</p>
<p>Developing methods to more easily measure and output existing validated clinical measures appears to be a valuable direction to pursue because of familiarity and existing use by clinicians (<xref ref-type="bibr" rid="B41">41</xref>). In addition to existing measures, developing methods to better measure kinematics, compute kinematic outcome measures, and validate them should be pursued in parallel. An approach to developing and evaluating new quantitative measures from kinematics is to compare the kinematics-based measure to currently used assessments that have been demonstrated to be valid and reliable (<xref ref-type="bibr" rid="B83">83</xref>, <xref ref-type="bibr" rid="B84">84</xref>). In healthcare, improving the quality of outcome measures and making the assessments easier to administer are important for patient outcomes and documentation. Outcomes research is used to understand the effectiveness of health services and interventions, or <italic>outcomes</italic>, necessitating outcome measures that are both valid and reliable (<xref ref-type="bibr" rid="B149">149</xref>). Furthermore, the need for repeated assessments to inform interventions throughout the rehabilitation cycle (<xref ref-type="bibr" rid="B150">150</xref>) necessitates easily acquired and sensitive movement quality measures (<xref ref-type="bibr" rid="B9">9</xref>).</p>
</sec>
<sec id="s7d"><label>7.4.</label><title>Assessment and interpretation</title>
<p>There is currently no consensus on how domain experts should use kinematic measures (<xref ref-type="bibr" rid="B9">9</xref>). Additionally, it is currently not known how computer-aided assessments or diagnosis (<xref ref-type="bibr" rid="B42">42</xref>) would best integrate into the kinematic analysis workflow used by a domain expert beyond use as a screening tool because of potential biases in the data (e.g., cultural and impairment variations), small reference data sets, and limited data on whether automated systems actually improve health outcomes (<xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B41">41</xref>). Although there has been success in automating aspects of robot-assisted surgical skill assessment and handwriting analysis, it is unlikely that domain experts, especially clinicians, will be replaced with fully autonomous systems responsible for deciding on interventions (<xref ref-type="bibr" rid="B42">42</xref>). Instead, a potentially more tractable approach is for computer-assisted methods to be designed to assist domain-experts in making decisions by providing more objective information (<xref ref-type="bibr" rid="B138">138</xref>).</p>
<p>Integrating artificial intelligence and autonomous systems (<xref ref-type="bibr" rid="B41">41</xref>) into domain expert processes is challenging and raises questions about reliability, trust, generalizability, and how domain experts and individuals can interface with the autonomous system. McDermott et al. (<xref ref-type="bibr" rid="B151">151</xref>) provides a framework for interviewing domain experts and establishing requirements that can enable an effective human-autonomy partnership. System-level user requirement studies can also inform the integration process (<xref ref-type="bibr" rid="B147">147</xref>, <xref ref-type="bibr" rid="B148">148</xref>). Additionally, cognitive systems engineering research could be an area that provides valuable quantitative evaluations on how computational tools integrate into domain expert workflows, such as the recently proposed joint activity testing framework (<xref ref-type="bibr" rid="B152">152</xref>). The need for more user-friendly kinematics measurement, segmentation, and analysis methods, as well as investigating how to integrate kinematic analyses into domain expert workflows (i.e., human factors), underscores the multidisciplinary approach required to meaningfully improve the quality and administration of UEFAs.</p>
</sec>
</sec>
<sec id="s8" sec-type="conclusions"><label>8.</label><title>Conclusion</title>
<p>Computer-assisted methods could serve an important role in improving outcomes by making kinematic measurement and analysis for UEFAs more accessible and cost-effective, especially for usage in clinics and one&#x2019;s natural environment. Markerless optical motion capture and automated segmentation algorithms are recent developments that may alleviate some of the most burdensome aspects of the kinematic analysis workflow. However, additional improvements are still needed, along with studies of validity, reliability, explainability, and generalizability for domain-specific UE applications. Better computer-assisted tools for kinematics analysis may also support the further development and evaluation of kinematics-based outcomes measures by giving domain-experts greater access to kinematics data and analysis tools. Furthermore, how best to incorporate kinematic analyses in domain expert workflows in a way that improves health or job-related outcomes remains an open problem. As evidenced by the wide-ranging reach of this review, interdisciplinary collaboration will be critical to developing computational tools that meaningfully support the kinematic analysis process for evaluating functional UE movement.</p>
</sec>
</body>
<back>
<sec id="s9"><title>Author contributions</title>
<p>All authors contributed to the conception and design of this study. KJ wrote the first draft of the manuscript. All authors revised the manuscript and approved the final version. All authors contributed to the article and approved the submitted version.</p>
</sec>
<sec id="s10" sec-type="funding-information"><title>Funding</title>
<p>This work was supported in part by the MITRE Corporation, McLean, Virginia. &#x00A9;2021 The MITRE Corporation. All rights reserved. Approved for public release. Distribution unlimited 22-00149-1. The funder was not involved in the study design, collection, analysis, interpretation of data, the writing of this article or the decision to submit it for publication.</p>
</sec>
<ack><title>Acknowledgments</title>
<p>Thank you to Meredith Hermann for helpful comments and resources regarding upper extremity functional assessments.</p>
</ack>
<sec id="s11" sec-type="COI-statement"><title>Conflict of interest</title>
<p>This study received funding from the MITRE Corporation. The funder had the following involvement with the study: none; funding was to support K. Jacksons time doing research as part of graduate school and was not directly involved in this study. Author K. Jackson was employed by the MITRE Corporation during the writing of this paper, author A. Santago is currently employed by the MITRE Corporation, S. DeStefano is employed by Optimal Motion, and L. Gerber is employed by the Inova Health System. The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s12" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>McKinley</surname><given-names>MP</given-names></name><name><surname>O&#x2019;Loughlin</surname><given-names>VD</given-names></name><name><surname>Bidle</surname><given-names>TS</given-names></name><name><surname>York</surname><given-names>J</given-names></name></person-group>, <source>Anatomy &#x0026; physiology: an integrative approach</source>. <edition>3 ed</edition>. <publisher-loc>New York (NY)</publisher-loc>: <publisher-name>McGraw-Hill Education</publisher-name> (<year>2016</year>), <comment>chap. 1.4d, 13</comment>.</citation></ref>
<ref id="B2"><label>2.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schambra</surname><given-names>HM</given-names></name><name><surname>Parnandi</surname><given-names>AR</given-names></name><name><surname>Pandit</surname><given-names>NG</given-names></name><name><surname>Uddin</surname><given-names>J</given-names></name><name><surname>Wirtanen</surname><given-names>A</given-names></name><name><surname>Nilsen</surname><given-names>DM</given-names></name></person-group>. <article-title>A taxonomy of functional upper extremity motion</article-title>. <source>Front Neurol</source>. (<year>2019</year>) <volume>10</volume>:<fpage>857</fpage>. <pub-id pub-id-type="doi">10.3389/fneur.2019.00857</pub-id><pub-id pub-id-type="pmid">31481922</pub-id></citation></ref>
<ref id="B3"><label>3.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Lundy-Ekman</surname><given-names>L</given-names></name></person-group>. <source>Neuroscience: fundamentals for rehabilitation</source>. <edition>5 ed</edition>. <publisher-loc>St. Louis (MO)</publisher-loc>: <publisher-name>Elsevier Health Sciences</publisher-name> (<year>2018</year>). p. <fpage>198</fpage>&#x2013;<lpage>9</lpage>, <comment>242&#x2013;289, 319&#x2013;321</comment>.</citation></ref>
<ref id="B4"><label>4.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Metcalf</surname><given-names>C</given-names></name><name><surname>Adams</surname><given-names>J</given-names></name><name><surname>Burridge</surname><given-names>J</given-names></name><name><surname>Yule</surname><given-names>V</given-names></name><name><surname>Chappell</surname><given-names>P</given-names></name></person-group>. <article-title>A review of clinical upper limb assessments within the framework of the who ICF</article-title>. <source>Musculoskeletal Care</source>. (<year>2007</year>) <volume>5</volume>:<fpage>160</fpage>&#x2013;<lpage>73</lpage>. <pub-id pub-id-type="doi">10.1002/msc.108</pub-id><pub-id pub-id-type="pmid">17610309</pub-id></citation></ref>
<ref id="B5"><label>5.</label><citation citation-type="book"><collab>WHO</collab>. <source>International classification of functioning, disability,, health (ICF)</source>. <publisher-loc>Geneva, Switzerland</publisher-loc>: <publisher-name>World Health Organization</publisher-name> (<year>2018</year>). <comment>Available from: <ext-link ext-link-type="uri" xlink:href="https://www.who.int/standards/classifications/international-classification-of-functioning-disability-and-health/">https://www.who.int/standards/classifications/international-classification-of-functioning-disability-and-health/</ext-link></comment></citation></ref>
<ref id="B6"><label>6.</label><citation citation-type="other"><collab>WHO</collab>. <comment>Towards a common language for functioning, disability,, health: ICF. <italic>International Classification of Functioning, Disability,, Health (ICF)</italic> (2002). Available from: <ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/m/item/icf-beginner-s-guide-towards-a-common-language-for-functioning-disability-and-health">https://www.who.int/publications/m/item/icf-beginner-s-guide-towards-a-common-language-for-functioning-disability-and-health</ext-link></comment></citation></ref>
<ref id="B7"><label>7.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>S</given-names></name><name><surname>Hsu</surname><given-names>CJ</given-names></name><name><surname>Trent</surname><given-names>L</given-names></name><name><surname>Ryan</surname><given-names>T</given-names></name><name><surname>Kearns</surname><given-names>NT</given-names></name><name><surname>Civillico</surname><given-names>EF</given-names></name></person-group>, et al. <article-title>Evaluation of performance-based outcome measures for the upper limb: a comprehensive narrative review</article-title>. <source>PM&#x0026;R</source>. (<year>2018</year>) <volume>10</volume>:<fpage>951</fpage>&#x2013;<lpage>62</lpage>. <pub-id pub-id-type="doi">10.1016/j.pmrj.2018.02.008</pub-id></citation></ref>
<ref id="B8"><label>8.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>de los Reyes-Guzm&#x00E1;n</surname><given-names>A</given-names></name><name><surname>Dimbwadyo-Terrer</surname><given-names>I</given-names></name><name><surname>Trincado-Alonso</surname><given-names>F</given-names></name><name><surname>Monasterio-Huelin</surname><given-names>F</given-names></name><name><surname>Torricelli</surname><given-names>D</given-names></name><name><surname>Gil-Agudo</surname><given-names>A</given-names></name></person-group>. <article-title>Quantitative assessment based on kinematic measures of functional impairments during upper extremity movements: a review</article-title>. <source>Clin Biomech</source>. (<year>2014</year>) <volume>29</volume>:<fpage>719</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinbiomech.2014.06.013</pub-id></citation></ref>
<ref id="B9"><label>9.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kwakkel</surname><given-names>G</given-names></name><name><surname>Van Wegen</surname><given-names>E</given-names></name><name><surname>Burridge</surname><given-names>J</given-names></name><name><surname>Winstein</surname><given-names>C</given-names></name><name><surname>Van Dokkum</surname><given-names>L</given-names></name><name><surname>Alt Murphy</surname><given-names>M</given-names></name></person-group>, et al. <article-title>Standardized measurement of quality of upper limb movement after stroke: consensus-based core recommendations from the second stroke recovery and rehabilitation roundtable</article-title>. <source>Int J Stroke</source>. (<year>2019</year>) <volume>14</volume>:<fpage>783</fpage>&#x2013;<lpage>91</lpage>. <pub-id pub-id-type="doi">10.1177/1747493019873519</pub-id><pub-id pub-id-type="pmid">31510885</pub-id></citation></ref>
<ref id="B10"><label>10.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aggarwal</surname><given-names>R</given-names></name><name><surname>Moorthy</surname><given-names>K</given-names></name><name><surname>Darzi</surname><given-names>A</given-names></name></person-group>. <article-title>Laparoscopic skills training, assessment</article-title>. <source>Br J Surg</source>. (<year>2004</year>) <volume>91</volume>:<fpage>1549</fpage>&#x2013;<lpage>58</lpage>. <pub-id pub-id-type="doi">10.1002/bjs.4816</pub-id><pub-id pub-id-type="pmid">15547882</pub-id></citation></ref>
<ref id="B11"><label>11.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dromey</surname><given-names>BP</given-names></name><name><surname>Ahmed</surname><given-names>S</given-names></name><name><surname>Vasconcelos</surname><given-names>F</given-names></name><name><surname>Mazomenos</surname><given-names>E</given-names></name><name><surname>Kunpalin</surname><given-names>Y</given-names></name><name><surname>Ourselin</surname><given-names>S</given-names></name></person-group>, et al. <article-title>Dimensionless squared jerk&#x2013;an objective differential to assess experienced, novice probe movement in obstetric ultrasound</article-title>. <source>Prenat Diagn</source>. (<year>2020</year>) <volume>41</volume>(<issue>2</issue>):<fpage>271</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1002/pd.5855</pub-id><pub-id pub-id-type="pmid">33103808</pub-id></citation></ref>
<ref id="B12"><label>12.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cowley</surname><given-names>J</given-names></name><name><surname>Resnik</surname><given-names>L</given-names></name><name><surname>Wilken</surname><given-names>J</given-names></name><name><surname>Smurr Walters</surname><given-names>L</given-names></name><name><surname>Gates</surname><given-names>D</given-names></name></person-group>. <article-title>Movement quality of conventional prostheses, the DEKA arm during everyday tasks</article-title>. <source>Prosthet Orthot Int</source>. (<year>2017</year>) <volume>41</volume>:<fpage>33</fpage>&#x2013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1177/030936461663134</pub-id><pub-id pub-id-type="pmid">26932980</pub-id></citation></ref>
<ref id="B13"><label>13.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Metzger</surname><given-names>AJ</given-names></name><name><surname>Dromerick</surname><given-names>AW</given-names></name><name><surname>Holley</surname><given-names>RJ</given-names></name><name><surname>Lum</surname><given-names>PS</given-names></name></person-group>. <article-title>Characterization of compensatory trunk movements during prosthetic upper limb reaching tasks</article-title>. <source>Arch Phys Med Rehabil</source>. (<year>2012</year>) <volume>93</volume>:<fpage>2029</fpage>&#x2013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.1016/j.apmr.2012.03.011</pub-id><pub-id pub-id-type="pmid">22449551</pub-id></citation></ref>
<ref id="B14"><label>14.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Major</surname><given-names>MJ</given-names></name><name><surname>Stine</surname><given-names>RL</given-names></name><name><surname>Heckathorne</surname><given-names>CW</given-names></name><name><surname>Fatone</surname><given-names>S</given-names></name><name><surname>Gard</surname><given-names>SA</given-names></name></person-group>. <article-title>Comparison of range-of-motion and variability in upper body movements between transradial prosthesis users and able-bodied controls when executing goal-oriented tasks</article-title>. <source>J Neuroeng Rehabil</source>. (<year>2014</year>) <volume>11</volume>:<fpage>132</fpage>. <pub-id pub-id-type="doi">10.1186/1743-0003-11-132</pub-id><pub-id pub-id-type="pmid">25192744</pub-id></citation></ref>
<ref id="B15"><label>15.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Engdahl</surname><given-names>SM</given-names></name><name><surname>Gates</surname><given-names>DH</given-names></name></person-group>. <article-title>Differences in quality of movements made with body-powered, myoelectric prostheses during activities of daily living</article-title>. <source>Clin Biomech</source>. (<year>2021</year>) <volume>84</volume>:<fpage>105311</fpage>. <pub-id pub-id-type="doi">10.1016/j.clinbiomech.2021.105311</pub-id></citation></ref>
<ref id="B16"><label>16.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schwarz</surname><given-names>A</given-names></name><name><surname>Kanzler</surname><given-names>CM</given-names></name><name><surname>Lambercy</surname><given-names>O</given-names></name><name><surname>Luft</surname><given-names>AR</given-names></name><name><surname>Veerbeek</surname><given-names>JM</given-names></name></person-group>. <article-title>Systematic review on kinematic assessments of upper limb movements after stroke</article-title>. <source>Stroke</source>. (<year>2019</year>) <volume>50</volume>:<fpage>718</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1161/STROKEAHA.118.023531</pub-id><pub-id pub-id-type="pmid">30776997</pub-id></citation></ref>
<ref id="B17"><label>17.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Cotton</surname><given-names>RJ</given-names></name></person-group>. <comment>Kinematic tracking of rehabilitation patients with markerless pose estimation fused with wearable inertial sensors. In: <italic>2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020)(FG)</italic>. Buenos Aires, Argentina (2020). p. 588&#x2013;594</comment>. <pub-id pub-id-type="doi">10.1109/FG47880.2020.00092</pub-id></citation></ref>
<ref id="B18"><label>18.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>El-Gohary</surname><given-names>M</given-names></name><name><surname>McNames</surname><given-names>J</given-names></name></person-group>. <article-title>Shoulder, elbow joint angle tracking with inertial sensors</article-title>. <source>IEEE Trans Biomed Eng</source>. (<year>2012</year>) <volume>59</volume>:<fpage>2635</fpage>&#x2013;<lpage>41</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2012.2208750</pub-id><pub-id pub-id-type="pmid">22911538</pub-id></citation></ref>
<ref id="B19"><label>19.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>K</given-names></name><name><surname>Song</surname><given-names>W-K</given-names></name><name><surname>Lee</surname><given-names>J</given-names></name><name><surname>Lee</surname><given-names>H-Y</given-names></name><name><surname>Park</surname><given-names>DS</given-names></name><name><surname>Ko</surname><given-names>B-W</given-names></name></person-group>, et al. <article-title>Kinematic analysis of upper extremity movement during drinking in hemiplegic subjects</article-title>. <source>Clin Biomech</source>. (<year>2014</year>) <volume>29</volume>:<fpage>248</fpage>&#x2013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1016/j.clinbiomech.2013.12.013</pub-id></citation></ref>
<ref id="B20"><label>20.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Valevicius</surname><given-names>AM</given-names></name><name><surname>Boser</surname><given-names>QA</given-names></name><name><surname>Lavoie</surname><given-names>EB</given-names></name><name><surname>Chapman</surname><given-names>CS</given-names></name><name><surname>Pilarski</surname><given-names>PM</given-names></name><name><surname>Hebert</surname><given-names>JS</given-names></name></person-group>, et al. <article-title>Characterization of normative angular joint kinematics during two functional upper limb tasks</article-title>. <source>Gait Posture</source>. (<year>2019</year>) <volume>69</volume>:<fpage>176</fpage>&#x2013;<lpage>86</lpage>. <pub-id pub-id-type="doi">10.1016/j.gaitpost.2019.01.037</pub-id><pub-id pub-id-type="pmid">30769260</pub-id></citation></ref>
<ref id="B21"><label>21.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kontson</surname><given-names>KL</given-names></name><name><surname>Marcus</surname><given-names>IP</given-names></name><name><surname>Myklebust</surname><given-names>BM</given-names></name><name><surname>Civillico</surname><given-names>EF</given-names></name></person-group>. <article-title>An integrated movement analysis framework to study upper limb function: a pilot study</article-title>. <source>IEEE Trans Neural Syst Rehabil Eng</source>. (<year>2017</year>) <volume>25</volume>:<fpage>1874</fpage>&#x2013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2017.2693234</pub-id><pub-id pub-id-type="pmid">28422686</pub-id></citation></ref>
<ref id="B22"><label>22.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Valevicius</surname><given-names>AM</given-names></name><name><surname>Jun</surname><given-names>PY</given-names></name><name><surname>Hebert</surname><given-names>JS</given-names></name><name><surname>Vette</surname><given-names>AH</given-names></name></person-group>. <article-title>Use of optical motion capture for the analysis of normative upper body kinematics during functional upper limb tasks: a systematic review</article-title>. <source>J Electromyogr Kinesiol</source>. (<year>2018</year>) <volume>40</volume>:<fpage>1</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1016/j.jelekin.2018.02.011</pub-id><pub-id pub-id-type="pmid">29533202</pub-id></citation></ref>
<ref id="B23"><label>23.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Winter</surname><given-names>DA</given-names></name></person-group>. <source>Biomechanics and motor control of human movement</source>. <edition>4th ed</edition>. <publisher-loc>Hoboken (NJ)</publisher-loc>: <publisher-name>John Wiley &#x0026; Sons</publisher-name> (<year>2009</year>). p. <fpage>1</fpage>&#x2013;<lpage>13</lpage>, <comment>chap. 1</comment>.</citation></ref>
<ref id="B24"><label>24.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Robert-Lachaine</surname><given-names>X</given-names></name><name><surname>Mecheri</surname><given-names>H</given-names></name><name><surname>Muller</surname><given-names>A</given-names></name><name><surname>Larue</surname><given-names>C</given-names></name><name><surname>Plamondon</surname><given-names>A</given-names></name></person-group>. <article-title>Validation of a low-cost inertial motion capture system for whole-body motion analysis</article-title>. <source>J Biomech</source>. (<year>2020</year>) <volume>99</volume>:<fpage>109520</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2019.109520</pub-id><pub-id pub-id-type="pmid">31787261</pub-id></citation></ref>
<ref id="B25"><label>25.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Filippeschi</surname><given-names>A</given-names></name><name><surname>Schmitz</surname><given-names>N</given-names></name><name><surname>Miezal</surname><given-names>M</given-names></name><name><surname>Bleser</surname><given-names>G</given-names></name><name><surname>Ruffaldi</surname><given-names>E</given-names></name><name><surname>Stricker</surname><given-names>D</given-names></name></person-group>. <article-title>Survey of motion tracking methods based on inertial sensors: a focus on upper limb human motion</article-title>. <source>Sensors</source>. (<year>2017</year>) <volume>17</volume>:<fpage>1257</fpage>. <pub-id pub-id-type="doi">10.3390/s17061257</pub-id><pub-id pub-id-type="pmid">28587178</pub-id></citation></ref>
<ref id="B26"><label>26.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Jackson</surname><given-names>K</given-names></name><name><surname>Duri&#x0107;</surname><given-names>Z</given-names></name><name><surname>Engdahl</surname><given-names>S</given-names></name><name><surname>Gerber</surname><given-names>L</given-names></name></person-group>. <comment>Characterizing functional upper extremity movement in haptic virtual environments. In: <italic>2020 42nd Annual International Conference of the IEEE Engineering in Medicine &#x0026; Biology Society (EMBC)</italic>. IEEE (2020). p. 3166&#x2013;3169</comment>. <pub-id pub-id-type="doi">10.1109/EMBC44109.2020.9176492</pub-id></citation></ref>
<ref id="B27"><label>27.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gavrila</surname><given-names>DM</given-names></name></person-group>. <article-title>The visual analysis of human movement: a survey</article-title>. <source>Comput Vis Image Underst</source>. (<year>1999</year>) <volume>73</volume>:<fpage>82</fpage>&#x2013;<lpage>98</lpage>. <pub-id pub-id-type="doi">10.1006/cviu.1998.0716</pub-id></citation></ref>
<ref id="B28"><label>28.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname><given-names>Z</given-names></name><name><surname>Martinez</surname><given-names>GH</given-names></name><name><surname>Simon</surname><given-names>T</given-names></name><name><surname>Wei</surname><given-names>S-E</given-names></name><name><surname>Sheikh</surname><given-names>YA</given-names></name></person-group>. <article-title>Openpose: realtime multi-person 2D pose estimation using part affinity fields</article-title>. <source>IEEE Trans Pattern Anal Mach Intell</source>. (<year>2019</year>) <volume>43</volume>(<issue>1</issue>):<fpage>172</fpage>&#x2013;<lpage>86</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2019.2929257</pub-id></citation></ref>
<ref id="B29"><label>29.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Kanazawa</surname><given-names>A</given-names></name><name><surname>Black</surname><given-names>MJ</given-names></name><name><surname>Jacobs</surname><given-names>DW</given-names></name><name><surname>Malik</surname><given-names>J</given-names></name></person-group>. <comment>End-to-end recovery of human shape, pose. In: <italic>Proceedings of the IEEE Conference on Computer Vision, Pattern Recognition</italic>. Salt Lake City, Utah: (2018). p. 7122&#x2013;7131</comment>.</citation></ref>
<ref id="B30"><label>30.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Zheng</surname><given-names>C</given-names></name><name><surname>Wu</surname><given-names>W</given-names></name><name><surname>Yang</surname><given-names>T</given-names></name><name><surname>Zhu</surname><given-names>S</given-names></name><name><surname>Chen</surname><given-names>C</given-names></name><name><surname>Liu</surname><given-names>R</given-names></name></person-group>, et al. <comment>Deep learning-based human pose estimation: a survey [Preprint] (2020). Available at:</comment> <pub-id pub-id-type="doi">10.48550/arXiv.2012.13392</pub-id></citation></ref>
<ref id="B31"><label>31.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Moeslund</surname><given-names>TB</given-names></name><name><surname>Hilton</surname><given-names>A</given-names></name><name><surname>Kr&#x00FC;ger</surname><given-names>V</given-names></name></person-group>. <article-title>A survey of advances in vision-based human motion capture and analysis</article-title>. <source>Comput Vis Image Underst</source>. (<year>2006</year>) <volume>104</volume>:<fpage>90</fpage>&#x2013;<lpage>126</lpage>. <pub-id pub-id-type="doi">10.1016/j.cviu.2006.08.002</pub-id></citation></ref>
<ref id="B32"><label>32.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Turaga</surname><given-names>P</given-names></name><name><surname>Chellappa</surname><given-names>R</given-names></name><name><surname>Subrahmanian</surname><given-names>VS</given-names></name><name><surname>Udrea</surname><given-names>O</given-names></name></person-group>. <article-title>Machine recognition of human activities: a survey</article-title>. <source>IEEE Trans Circuits Syst Video Technol</source>. (<year>2008</year>) <volume>18</volume>:<fpage>1473</fpage>&#x2013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1109/TCSVT.2008.2005594</pub-id></citation></ref>
<ref id="B33"><label>33.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poppe</surname><given-names>R</given-names></name></person-group>. <article-title>A survey on vision-based human action recognition</article-title>. <source>Image Vis Comput</source>. (<year>2010</year>) <volume>28</volume>:<fpage>976</fpage>&#x2013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1016/j.imavis.2009.11.014</pub-id></citation></ref>
<ref id="B34"><label>34.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Herath</surname><given-names>S</given-names></name><name><surname>Harandi</surname><given-names>M</given-names></name><name><surname>Porikli</surname><given-names>F</given-names></name></person-group>. <article-title>Going deeper into action recognition: a survey</article-title>. <source>Image Vis Comput</source>. (<year>2017</year>) <volume>60</volume>:<fpage>4</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1016/j.imavis.2017.01.010</pub-id></citation></ref>
<ref id="B35"><label>35.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lara</surname><given-names>OD</given-names></name><name><surname>Labrador</surname><given-names>MA</given-names></name></person-group>. <article-title>A survey on human activity recognition using wearable sensors</article-title>. <source>IEEE Commun Surv Tutor</source>. (<year>2012</year>) <volume>15</volume>:<fpage>1192</fpage>&#x2013;<lpage>209</lpage>. <pub-id pub-id-type="doi">10.1109/SURV.2012.110112.00192</pub-id></citation></ref>
<ref id="B36"><label>36.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>J</given-names></name><name><surname>Chen</surname><given-names>Y</given-names></name><name><surname>Hao</surname><given-names>S</given-names></name><name><surname>Peng</surname><given-names>X</given-names></name><name><surname>Hu</surname><given-names>L</given-names></name></person-group>. <article-title>Deep learning for sensor-based activity recognition: a survey</article-title>. <source>Pattern Recognit Lett</source>. (<year>2019</year>) <volume>119</volume>:<fpage>3</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1016/j.patrec.2018.02.010</pub-id></citation></ref>
<ref id="B37"><label>37.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Carreira</surname><given-names>J</given-names></name><name><surname>Zisserman</surname><given-names>A</given-names></name></person-group>. <comment>Quo vadis, action recognition? A new model, the kinetics dataset. In: <italic>Proceedings of the IEEE Conference on Computer Vision, Pattern Recognition</italic>. Honolulu, Hawaii (2017). p. 6299&#x2013;6308</comment>.</citation></ref>
<ref id="B38"><label>38.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Seethapathi</surname><given-names>N</given-names></name><name><surname>Wang</surname><given-names>S</given-names></name><name><surname>Saluja</surname><given-names>R</given-names></name><name><surname>Blohm</surname><given-names>G</given-names></name><name><surname>Kording</surname><given-names>KP</given-names></name></person-group>. <comment>Movement science needs different pose tracking algorithms [Preprint] (2019). Available at: <ext-link ext-link-type="uri" xlink:href="HTTPS://doi.org/10.48550/arXiv.1907.10226">HTTPS://doi.org/10.48550/arXiv.1907.10226</ext-link></comment></citation></ref>
<ref id="B39"><label>39.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Angus</surname><given-names>DC</given-names></name></person-group>. <article-title>Randomized clinical trials of artificial intelligence</article-title>. <source>JAMA</source>. (<year>2020</year>) <volume>323</volume>:<fpage>1043</fpage>&#x2013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1001/jama.2020.1039</pub-id><pub-id pub-id-type="pmid">32065828</pub-id></citation></ref>
<ref id="B40"><label>40.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liao</surname><given-names>Y</given-names></name><name><surname>Vakanski</surname><given-names>A</given-names></name><name><surname>Xian</surname><given-names>M</given-names></name><name><surname>Paul</surname><given-names>D</given-names></name><name><surname>Baker</surname><given-names>R</given-names></name></person-group>. <article-title>A review of computational approaches for evaluation of rehabilitation exercises</article-title>. <source>Comput Biol Med</source>. (<year>2020</year>) <volume>119</volume>:<fpage>103687</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.103687</pub-id><pub-id pub-id-type="pmid">32339122</pub-id></citation></ref>
<ref id="B41"><label>41.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Simba&#x00F1;a</surname><given-names>EDO</given-names></name><name><surname>Baeza</surname><given-names>PS-H</given-names></name><name><surname>Huete</surname><given-names>AJ</given-names></name><name><surname>Balaguer</surname><given-names>C</given-names></name></person-group>. <article-title>Review of automated systems for upper limbs functional assessment in neurorehabilitation</article-title>. <source>IEEE Access</source>. (<year>2019</year>) <volume>7</volume>:<fpage>32352</fpage>&#x2013;<lpage>67</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2901814</pub-id></citation></ref>
<ref id="B42"><label>42.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Impedovo</surname><given-names>D</given-names></name><name><surname>Pirlo</surname><given-names>G</given-names></name></person-group>. <article-title>Dynamic handwriting analysis for the assessment of neurodegenerative diseases: a pattern recognition perspective</article-title>. <source>IEEE Rev Biomed Eng</source>. (<year>2018</year>) <volume>12</volume>:<fpage>209</fpage>&#x2013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1109/RBME.2018.2840679</pub-id><pub-id pub-id-type="pmid">29993722</pub-id></citation></ref>
<ref id="B43"><label>43.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Knippenberg</surname><given-names>E</given-names></name><name><surname>Verbrugghe</surname><given-names>J</given-names></name><name><surname>Lamers</surname><given-names>I</given-names></name><name><surname>Palmaers</surname><given-names>S</given-names></name><name><surname>Timmermans</surname><given-names>A</given-names></name><name><surname>Spooren</surname><given-names>A</given-names></name></person-group>. <article-title>Markerless motion capture systems as training device in neurological rehabilitation: a systematic review of their use, application, target population and efficacy</article-title>. <source>J Neuroeng Rehabil</source>. (<year>2017</year>) <volume>14</volume>:<fpage>1</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1186/s12984-017-0270-x</pub-id><pub-id pub-id-type="pmid">28057016</pub-id></citation></ref>
<ref id="B44"><label>44.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alarc&#x00F3;n-Aldana</surname><given-names>AC</given-names></name><name><surname>Callejas-Cuervo</surname><given-names>M</given-names></name><name><surname>Bo</surname><given-names>APL</given-names></name></person-group>. <article-title>Upper limb physical rehabilitation using serious videogames and motion capture systems: a systematic review</article-title>. <source>Sensors</source>. (<year>2020</year>) <volume>20</volume>:<fpage>5989</fpage>. <pub-id pub-id-type="doi">10.3390/s20215989</pub-id></citation></ref>
<ref id="B45"><label>45.</label><citation citation-type="book"><person-group person-group-type="author"><name><surname>Bartlett</surname><given-names>R</given-names></name></person-group>. <source>Introduction to sports biomechanics: analysing human movement patterns</source>. <edition>3 ed</edition>. <publisher-loc>New York, New York</publisher-loc>: <publisher-name>Routledge</publisher-name> (<year>2014</year>).</citation></ref>
<ref id="B46"><label>46.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Benalc&#x00E1;zar</surname><given-names>ME</given-names></name><name><surname>Motoche</surname><given-names>C</given-names></name><name><surname>Zea</surname><given-names>JA</given-names></name><name><surname>Jaramillo</surname><given-names>AG</given-names></name><name><surname>Anchundia</surname><given-names>CE</given-names></name><name><surname>Zambrano</surname><given-names>P</given-names></name></person-group>, et al. <comment>Real-time hand gesture recognition using the Myo armband, muscle activity detection. In: <italic>2017 IEEE Second Ecuador Technical Chapters Meeting (ETCM)</italic>. IEEE (2017). p. 1&#x2013;6</comment>. <pub-id pub-id-type="doi">10.1109/ETCM.2017.8247458</pub-id></citation></ref>
<ref id="B47"><label>47.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cheok</surname><given-names>MJ</given-names></name><name><surname>Omar</surname><given-names>Z</given-names></name><name><surname>Jaward</surname><given-names>MH</given-names></name></person-group>. <article-title>A review of hand gesture and sign language recognition techniques</article-title>. <source>Int J Mach Learn Cybern</source>. (<year>2019</year>) <volume>10</volume>:<fpage>131</fpage>&#x2013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1007/s13042-017-0705-5</pub-id></citation></ref>
<ref id="B48"><label>48.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lin</surname><given-names>JF-S</given-names></name><name><surname>Karg</surname><given-names>M</given-names></name><name><surname>Kuli&#x0107;</surname><given-names>D</given-names></name></person-group>. <article-title>Movement primitive segmentation for human motion modeling: a framework for analysis</article-title>. <source>IEEE Trans Human Mach Syst</source>. (<year>2016</year>) <volume>46</volume>:<fpage>325</fpage>&#x2013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1109/THMS.2015.2493536</pub-id></citation></ref>
<ref id="B49"><label>49.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barth</surname><given-names>J</given-names></name><name><surname>Lohse</surname><given-names>KR</given-names></name><name><surname>Konrad</surname><given-names>JD</given-names></name><name><surname>Bland</surname><given-names>MD</given-names></name><name><surname>Lang</surname><given-names>CE</given-names></name></person-group>. <article-title>Sensor-based categorization of upper limb performance in daily life of persons with and without neurological upper limb deficits</article-title>. <source>Front Rehabil Sci</source>. (<year>2021</year>) <volume>2</volume>:<fpage>741393</fpage>. <pub-id pub-id-type="doi">10.3389/fresc.2021.741393</pub-id><pub-id pub-id-type="pmid">35382114</pub-id></citation></ref>
<ref id="B50"><label>50.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Poitras</surname><given-names>I</given-names></name><name><surname>Dupuis</surname><given-names>F</given-names></name><name><surname>Bielmann</surname><given-names>M</given-names></name><name><surname>Campeau-Lecours</surname><given-names>A</given-names></name><name><surname>Mercier</surname><given-names>C</given-names></name><name><surname>Bouyer</surname><given-names>LJ</given-names></name></person-group>, et al. <article-title>Validity and reliability of wearable sensors for joint angle estimation: a systematic review</article-title>. <source>Sensors</source>. (<year>2019</year>) <volume>19</volume>:<fpage>1555</fpage>. <pub-id pub-id-type="doi">10.3390/s19071555</pub-id><pub-id pub-id-type="pmid">30935116</pub-id></citation></ref>
<ref id="B51"><label>51.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Oropesa</surname><given-names>I</given-names></name><name><surname>S&#x00E1;nchez-Gonz&#x00E1;lez</surname><given-names>P</given-names></name><name><surname>Chmarra</surname><given-names>MK</given-names></name><name><surname>Lamata</surname><given-names>P</given-names></name><name><surname>Fern&#x00E1;ndez</surname><given-names>A</given-names></name><name><surname>S&#x00E1;nchez-Margallo</surname><given-names>JA</given-names></name></person-group>, et al. <article-title>EVA: laparoscopic instrument tracking based on endoscopic video analysis for psychomotor skills assessment</article-title>. <source>Surg Endosc</source>. (<year>2013</year>) <volume>27</volume>:<fpage>1029</fpage>&#x2013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1007/s00464-012-2513-z</pub-id><pub-id pub-id-type="pmid">23052495</pub-id></citation></ref>
<ref id="B52"><label>52.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>P&#x00E9;rez-Escamirosa</surname><given-names>F</given-names></name><name><surname>Alarc&#x00F3;n-Paredes</surname><given-names>A</given-names></name><name><surname>Alonso-Silverio</surname><given-names>GA</given-names></name><name><surname>Oropesa</surname><given-names>I</given-names></name><name><surname>Camacho-Nieto</surname><given-names>O</given-names></name><name><surname>Lorias-Espinoza</surname><given-names>D</given-names></name></person-group>, et al. <article-title>Objective classification of psychomotor laparoscopic skills of surgeons based on three different approaches</article-title>. <source>Int J Comput Assist Radiol Surg</source>. (<year>2020</year>) <volume>15</volume>:<fpage>27</fpage>&#x2013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1007/s11548-019-02073-2</pub-id></citation></ref>
<ref id="B53"><label>53.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bourke</surname><given-names>TC</given-names></name><name><surname>Coderre</surname><given-names>AM</given-names></name><name><surname>Bagg</surname><given-names>SD</given-names></name><name><surname>Dukelow</surname><given-names>SP</given-names></name><name><surname>Norman</surname><given-names>KE</given-names></name><name><surname>Scott</surname><given-names>SH</given-names></name></person-group>. <article-title>Impaired corrective responses to postural perturbations of the arm in individuals with subacute stroke</article-title>. <source>J Neuroeng Rehabil</source>. (<year>2015</year>) <volume>12</volume>:<fpage>1</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1186/1743-0003-12-7</pub-id><pub-id pub-id-type="pmid">25557982</pub-id></citation></ref>
<ref id="B54"><label>54.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kontson</surname><given-names>K</given-names></name><name><surname>Marcus</surname><given-names>I</given-names></name><name><surname>Myklebust</surname><given-names>B</given-names></name><name><surname>Civillico</surname><given-names>E</given-names></name></person-group>. <article-title>Targeted box and blocks test: normative data and comparison to standard tests</article-title>. <source>PLoS ONE</source>. (<year>2017</year>) <volume>12</volume>:<fpage>e0177965</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0177965</pub-id><pub-id pub-id-type="pmid">28542374</pub-id></citation></ref>
<ref id="B55"><label>55.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Topley</surname><given-names>M</given-names></name><name><surname>Richards</surname><given-names>JG</given-names></name></person-group>. <article-title>A comparison of currently available optoelectronic motion capture systems</article-title>. <source>J Biomech</source>. (<year>2020</year>) <volume>106</volume>:<fpage>109820</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2020.109820</pub-id><pub-id pub-id-type="pmid">32517978</pub-id></citation></ref>
<ref id="B56"><label>56.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parks</surname><given-names>MT</given-names></name><name><surname>Wang</surname><given-names>Z</given-names></name><name><surname>Siu</surname><given-names>K-C</given-names></name></person-group>. <article-title>Current low-cost video-based motion analysis options for clinical rehabilitation: a systematic review</article-title>. <source>Phys Ther</source>. (<year>2019</year>) <volume>99</volume>:<fpage>1405</fpage>&#x2013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1093/ptj/pzz097</pub-id><pub-id pub-id-type="pmid">31309974</pub-id></citation></ref>
<ref id="B57"><label>57.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>Y</given-names></name><name><surname>Tian</surname><given-names>Y</given-names></name><name><surname>He</surname><given-names>M</given-names></name></person-group>. <article-title>Monocular human pose estimation: a survey of deep learning-based methods</article-title>. <source>Comput Vis Image Underst</source>. (<year>2020</year>) <volume>192</volume>:<fpage>102897</fpage>. <pub-id pub-id-type="doi">10.1016/j.cviu.2019.102897</pub-id></citation></ref>
<ref id="B58"><label>58.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sahin</surname><given-names>I</given-names></name><name><surname>Modi</surname><given-names>A</given-names></name><name><surname>Kokkoni</surname><given-names>E</given-names></name></person-group>. <article-title>Evaluation of openpose for quantifying infant reaching motion</article-title>. <source>Arch Phys Med Rehabil</source>. (<year>2021</year>) <volume>102</volume>:<fpage>e86</fpage>. <pub-id pub-id-type="doi">10.1016/j.apmr.2021.07.728</pub-id></citation></ref>
<ref id="B59"><label>59.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ivorra</surname><given-names>E</given-names></name><name><surname>Ortega P&#x00E9;rez</surname><given-names>M</given-names></name><name><surname>Alca&#x00F1;iz Raya</surname><given-names>ML</given-names></name></person-group>. <article-title>Azure kinect body tracking under review for the specific case of upper limb exercises</article-title>. <source>MM Sci J (Online)</source>. (<year>2021</year>) <volume>2021</volume>:<fpage>4333</fpage>&#x2013;<lpage>41</lpage>. <pub-id pub-id-type="doi">10.17973/MMSJ.2021-6-2021012</pub-id></citation></ref>
<ref id="B60"><label>60.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>R</given-names></name><name><surname>Liu</surname><given-names>Z</given-names></name><name><surname>Tan</surname><given-names>J</given-names></name></person-group>. <article-title>A survey on 3D hand pose estimation: cameras, methods, and datasets</article-title>. <source>Pattern Recognit</source>. (<year>2019</year>) <volume>93</volume>:<fpage>251</fpage>&#x2013;<lpage>72</lpage>. <pub-id pub-id-type="doi">10.1016/j.patcog.2019.04.026</pub-id></citation></ref>
<ref id="B61"><label>61.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kanko</surname><given-names>RM</given-names></name><name><surname>Laende</surname><given-names>EK</given-names></name><name><surname>Strutzenberger</surname><given-names>G</given-names></name><name><surname>Brown</surname><given-names>M</given-names></name><name><surname>Selbie</surname><given-names>WS</given-names></name><name><surname>DePaul</surname><given-names>V</given-names></name></person-group>, et al. <article-title>Assessment of spatiotemporal gait parameters using a deep learning algorithm-based markerless motion capture system</article-title>. <source>J Biomech</source>. (<year>2021</year>) <volume>122</volume>:<fpage>110414</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2021.110414</pub-id><pub-id pub-id-type="pmid">33915475</pub-id></citation></ref>
<ref id="B62"><label>62.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Uhlrich</surname><given-names>SD</given-names></name><name><surname>Falisse</surname><given-names>A</given-names></name><name><surname>Kidzi&#x0144;ski</surname><given-names>&#x0141;</given-names></name><name><surname>Muccini</surname><given-names>J</given-names></name><name><surname>Ko</surname><given-names>M</given-names></name><name><surname>Chaudhari</surname><given-names>AS</given-names></name></person-group>, et al. <comment>OpenCap: 3D human movement dynamics from smartphone videos. bioRxiv</comment>. <pub-id pub-id-type="doi">10.1101/2022.07.07.499061</pub-id></citation></ref>
<ref id="B63"><label>63.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nakano</surname><given-names>N</given-names></name><name><surname>Sakura</surname><given-names>T</given-names></name><name><surname>Ueda</surname><given-names>K</given-names></name><name><surname>Omura</surname><given-names>L</given-names></name><name><surname>Kimura</surname><given-names>A</given-names></name><name><surname>Iino</surname><given-names>Y</given-names></name></person-group>, et al. <article-title>Evaluation of 3D markerless motion capture accuracy using openpose with multiple video cameras</article-title>. <source>Front Sports Active Living</source>. (<year>2020</year>) <volume>2</volume>:<fpage>50</fpage>. <pub-id pub-id-type="doi">10.3389/fspor.2020.00050</pub-id></citation></ref>
<ref id="B64"><label>64.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Needham</surname><given-names>L</given-names></name><name><surname>Evans</surname><given-names>M</given-names></name><name><surname>Cosker</surname><given-names>DP</given-names></name><name><surname>Wade</surname><given-names>L</given-names></name><name><surname>McGuigan</surname><given-names>PM</given-names></name><name><surname>Bilzon</surname><given-names>JL</given-names></name></person-group>, et al. <article-title>The accuracy of several pose estimation methods for 3D joint centre localisation</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>(<issue>1</issue>). <pub-id pub-id-type="doi">10.1038/s41598-021-00212-x</pub-id><pub-id pub-id-type="pmid">34667207</pub-id></citation></ref>
<ref id="B65"><label>65.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Webster</surname><given-names>D</given-names></name><name><surname>Celik</surname><given-names>O</given-names></name></person-group>. <comment>Experimental evaluation of microsoft kinect&#x2019;s accuracy, capture rate for stroke rehabilitation applications. In: <italic>2014 IEEE Haptics Symposium (HAPTICS)</italic>. IEEE (2014). p. 455&#x2013;460</comment>. <pub-id pub-id-type="doi">10.1109/HAPTICS.2014.6775498</pub-id></citation></ref>
<ref id="B66"><label>66.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>W-S</given-names></name><name><surname>Cho</surname><given-names>S</given-names></name><name><surname>Baek</surname><given-names>D</given-names></name><name><surname>Bang</surname><given-names>H</given-names></name><name><surname>Paik</surname><given-names>N-J</given-names></name></person-group>. <article-title>Upper extremity functional evaluation by Fugl-Meyer assessment scoring using depth-sensing camera in hemiplegic stroke patients</article-title>. <source>PLoS ONE</source>. (<year>2016</year>) <volume>11</volume>:<fpage>e0158640</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0158640</pub-id><pub-id pub-id-type="pmid">27367518</pub-id></citation></ref>
<ref id="B67"><label>67.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Napoli</surname><given-names>A</given-names></name><name><surname>Glass</surname><given-names>S</given-names></name><name><surname>Ward</surname><given-names>C</given-names></name><name><surname>Tucker</surname><given-names>C</given-names></name><name><surname>Obeid</surname><given-names>I</given-names></name></person-group>. <article-title>Performance analysis of a generalized motion capture system using microsoft kinect 2.0</article-title>. <source>Biomed Signal Process Control</source>. (<year>2017</year>) <volume>38</volume>:<fpage>265</fpage>&#x2013;<lpage>80</lpage>. <pub-id pub-id-type="doi">10.1016/j.bspc.2017.06.006</pub-id></citation></ref>
<ref id="B68"><label>68.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reither</surname><given-names>LR</given-names></name><name><surname>Foreman</surname><given-names>MH</given-names></name><name><surname>Migotsky</surname><given-names>N</given-names></name><name><surname>Haddix</surname><given-names>C</given-names></name><name><surname>Engsberg</surname><given-names>JR</given-names></name></person-group>. <article-title>Upper extremity movement reliability and validity of the kinect version 2</article-title>. <source>Disabil Rehabil Assist Technol</source>. (<year>2018</year>) <volume>13</volume>:<fpage>54</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1080/17483107.2016.1278473</pub-id><pub-id pub-id-type="pmid">28102090</pub-id></citation></ref>
<ref id="B69"><label>69.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sarsfield</surname><given-names>J</given-names></name><name><surname>Brown</surname><given-names>D</given-names></name><name><surname>Sherkat</surname><given-names>N</given-names></name><name><surname>Langensiepen</surname><given-names>C</given-names></name><name><surname>Lewis</surname><given-names>J</given-names></name><name><surname>Taheri</surname><given-names>M</given-names></name></person-group>, et al. <article-title>Clinical assessment of depth sensor based pose estimation algorithms for technology supervised rehabilitation applications</article-title>. <source>Int J Med Inform</source>. (<year>2019</year>) <volume>121</volume>:<fpage>30</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2018.11.001</pub-id><pub-id pub-id-type="pmid">30545487</pub-id></citation></ref>
<ref id="B70"><label>70.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Scano</surname><given-names>A</given-names></name><name><surname>Mira</surname><given-names>RM</given-names></name><name><surname>Cerveri</surname><given-names>P</given-names></name><name><surname>Molinari Tosatti</surname><given-names>L</given-names></name><name><surname>Sacco</surname><given-names>M</given-names></name></person-group>. <article-title>Analysis of upper-limb and trunk kinematic variability: accuracy and reliability of an RGB-D sensor</article-title>. <source>Multimodal Technol Interact</source>. (<year>2020</year>) <volume>4</volume>:<fpage>14</fpage>. <pub-id pub-id-type="doi">10.3390/mti4020014</pub-id></citation></ref>
<ref id="B71"><label>71.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>YM</given-names></name><name><surname>Lee</surname><given-names>S</given-names></name><name><surname>Uhm</surname><given-names>KE</given-names></name><name><surname>Kurillo</surname><given-names>G</given-names></name><name><surname>Han</surname><given-names>JJ</given-names></name><name><surname>Lee</surname><given-names>J</given-names></name></person-group>. <article-title>Upper limb three-dimensional reachable workspace analysis using the Kinect sensor in hemiplegic stroke patients: a cross-sectional observational study</article-title>. <source>Am J Phys Med Rehabil</source>. (<year>2020</year>) <volume>99</volume>:<fpage>397</fpage>&#x2013;<lpage>403</lpage>. <pub-id pub-id-type="doi">10.1097/PHM.0000000000001350</pub-id><pub-id pub-id-type="pmid">31725017</pub-id></citation></ref>
<ref id="B72"><label>72.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Carnevale</surname><given-names>A</given-names></name><name><surname>Longo</surname><given-names>UG</given-names></name><name><surname>Schena</surname><given-names>E</given-names></name><name><surname>Massaroni</surname><given-names>C</given-names></name><name><surname>Lo Presti</surname><given-names>D</given-names></name><name><surname>Berton</surname><given-names>A</given-names></name></person-group>, et al. <article-title>Wearable systems for shoulder kinematics assessment: a systematic review</article-title>. <source>BMC Musculoskelet Disord</source>. (<year>2019</year>) <volume>20</volume>(<issue>1</issue>). <pub-id pub-id-type="doi">10.1186/s12891-019-2930-4</pub-id><pub-id pub-id-type="pmid">31731893</pub-id></citation></ref>
<ref id="B73"><label>73.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baldi</surname><given-names>TL</given-names></name><name><surname>Farina</surname><given-names>F</given-names></name><name><surname>Garulli</surname><given-names>A</given-names></name><name><surname>Giannitrapani</surname><given-names>A</given-names></name><name><surname>Prattichizzo</surname><given-names>D</given-names></name></person-group>. <article-title>Upper body pose estimation using wearable inertial sensors and multiplicative Kalman filter</article-title>. <source>IEEE Sens J</source>. (<year>2019</year>) <volume>20</volume>:<fpage>492</fpage>&#x2013;<lpage>500</lpage>. <pub-id pub-id-type="doi">10.1109/JSEN.2019.2940612</pub-id></citation></ref>
<ref id="B74"><label>74.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nie</surname><given-names>JZ</given-names></name><name><surname>Nie</surname><given-names>JW</given-names></name><name><surname>Hung</surname><given-names>N-T</given-names></name><name><surname>Cotton</surname><given-names>RJ</given-names></name><name><surname>Slutzky</surname><given-names>MW</given-names></name></person-group>. <article-title>Portable, open-source solutions for estimating wrist position during reaching in people with stroke</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>(<issue>1</issue>). <pub-id pub-id-type="doi">10.1038/s41598-021-01805-2</pub-id></citation></ref>
<ref id="B75"><label>75.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Feng</surname><given-names>Y</given-names></name><name><surname>Choutas</surname><given-names>V</given-names></name><name><surname>Bolkart</surname><given-names>T</given-names></name><name><surname>Tzionas</surname><given-names>D</given-names></name><name><surname>Black</surname><given-names>MJ</given-names></name></person-group>. <comment>Collaborative regression of expressive bodies using moderation. In: <italic>2021 International Conference on 3D Vision (3DV)</italic>. IEEE (2021). p. 792&#x2013;804</comment>. <pub-id pub-id-type="doi">10.1109/3DV53792.2021.00088</pub-id></citation></ref>
<ref id="B76"><label>76.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Choutas</surname><given-names>V</given-names></name><name><surname>Pavlakos</surname><given-names>G</given-names></name><name><surname>Bolkart</surname><given-names>T</given-names></name><name><surname>Tzionas</surname><given-names>D</given-names></name><name><surname>Black</surname><given-names>MJ</given-names></name></person-group>. <comment>Monocular expressive body regression through body-driven attention. In: <italic>European Conference on Computer Vision</italic>. Springer (2020). p. 20&#x2013;40</comment>. <pub-id pub-id-type="doi">10.1007/978-3-030-58607-2-2</pub-id></citation></ref>
<ref id="B77"><label>77.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Rempe</surname><given-names>D</given-names></name><name><surname>Birdal</surname><given-names>T</given-names></name><name><surname>Hertzmann</surname><given-names>A</given-names></name><name><surname>Yang</surname><given-names>J</given-names></name><name><surname>Sridhar</surname><given-names>S</given-names></name><name><surname>Guibas</surname><given-names>LJ</given-names></name></person-group>. <comment>HuMoR: 3D human motion model for robust pose estimation. In: <italic>Proceedings of the IEEE/CVF International Conference on Computer Vision</italic>. Montreal, QC, Canada (2021). p. 11488&#x2013;11499</comment>.</citation></ref>
<ref id="B78"><label>78.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Jatesiktat</surname><given-names>P</given-names></name><name><surname>Anopas</surname><given-names>D</given-names></name><name><surname>Ang</surname><given-names>WT</given-names></name></person-group>. <comment>Personalized markerless upper-body tracking with a depth camera and wrist-worn inertial measurement units. In: <italic>2018 40th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)</italic>. IEEE (2018). p. 1&#x2013;6</comment>. <pub-id pub-id-type="doi">10.1109/EMBC.2018.8513068</pub-id></citation></ref>
<ref id="B79"><label>79.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kidzi&#x0144;ski</surname><given-names>&#x0141;</given-names></name><name><surname>Yang</surname><given-names>B</given-names></name><name><surname>Hicks</surname><given-names>JL</given-names></name><name><surname>Rajagopal</surname><given-names>A</given-names></name><name><surname>Delp</surname><given-names>SL</given-names></name><name><surname>Schwartz</surname><given-names>MH</given-names></name></person-group>. <article-title>Deep neural networks enable quantitative movement analysis using single-camera videos</article-title>. <source>Nat Commun</source>. (<year>2020</year>) <volume>11</volume>:<fpage>1</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1038/s41467-020-17807-z</pub-id></citation></ref>
<ref id="B80"><label>80.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cronin</surname><given-names>NJ</given-names></name></person-group>. <article-title>Using deep neural networks for kinematic analysis: challenges and opportunities</article-title>. <source>J Biomech</source>. (<year>2021</year>) <volume>123</volume>:<fpage>110460</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2021.110460</pub-id><pub-id pub-id-type="pmid">34029787</pub-id></citation></ref>
<ref id="B81"><label>81.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shotton</surname><given-names>J</given-names></name><name><surname>Girshick</surname><given-names>R</given-names></name><name><surname>Fitzgibbon</surname><given-names>A</given-names></name><name><surname>Sharp</surname><given-names>T</given-names></name><name><surname>Cook</surname><given-names>M</given-names></name><name><surname>Finocchio</surname><given-names>M</given-names></name></person-group>, et al. <article-title>Efficient human pose estimation from single depth images</article-title>. <source>IEEE Trans Pattern Anal Mach Intell</source>. (<year>2012</year>) <volume>35</volume>:<fpage>2821</fpage>&#x2013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1109/TPAMI.2012.241</pub-id></citation></ref>
<ref id="B82"><label>82.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Han</surname><given-names>JJ</given-names></name><name><surname>Kurillo</surname><given-names>G</given-names></name><name><surname>Abresch</surname><given-names>RT</given-names></name><name><surname>Nicorici</surname><given-names>A</given-names></name><name><surname>Bajcsy</surname><given-names>R</given-names></name></person-group>. <article-title>Validity, reliability, and sensitivity of a 3d vision sensor-based upper extremity reachable workspace evaluation in neuromuscular diseases</article-title>. <source>PLoS Curr</source>. (<year>2013</year>) <volume>5</volume>. <pub-id pub-id-type="doi">10.1371/currents.md.f63ae7dde63caa718fa0770217c5a0e6</pub-id></citation></ref>
<ref id="B83"><label>83.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Olesh</surname><given-names>EV</given-names></name><name><surname>Yakovenko</surname><given-names>S</given-names></name><name><surname>Gritsenko</surname><given-names>V</given-names></name></person-group>. <article-title>Automated assessment of upper extremity movement impairment due to stroke</article-title>. <source>PLoS ONE</source>. (<year>2014</year>) <volume>9</volume>:<fpage>e104487</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0104487</pub-id><pub-id pub-id-type="pmid">25100036</pub-id></citation></ref>
<ref id="B84"><label>84.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>Z</given-names></name><name><surname>Rafiei</surname><given-names>MH</given-names></name><name><surname>Hall</surname><given-names>A</given-names></name><name><surname>Thomas</surname><given-names>C</given-names></name><name><surname>Midtlien</surname><given-names>HA</given-names></name><name><surname>Hasselbach</surname><given-names>A</given-names></name></person-group>, et al. <article-title>A novel methodology for extracting and evaluating therapeutic movements in game-based motion capture rehabilitation systems</article-title>. <source>J Med Syst</source>. (<year>2018</year>) <volume>42</volume>:<fpage>1</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1007/s10916-018-1113-4</pub-id></citation></ref>
<ref id="B85"><label>85.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Loper</surname><given-names>M</given-names></name><name><surname>Mahmood</surname><given-names>N</given-names></name><name><surname>Romero</surname><given-names>J</given-names></name><name><surname>Pons-Moll</surname><given-names>G</given-names></name><name><surname>Black</surname><given-names>MJ</given-names></name></person-group>. <article-title>SMPL: a skinned multi-person linear model</article-title>. <source>ACM Trans Graph (TOG)</source>. (<year>2015</year>) <volume>34</volume>:<fpage>1</fpage>&#x2013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1145/2816795.2818013</pub-id></citation></ref>
<ref id="B86"><label>86.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lang</surname><given-names>CE</given-names></name><name><surname>Barth</surname><given-names>J</given-names></name><name><surname>Holleran</surname><given-names>CL</given-names></name><name><surname>Konrad</surname><given-names>JD</given-names></name><name><surname>Bland</surname><given-names>MD</given-names></name></person-group>. <article-title>Implementation of wearable sensing technology for movement: pushing forward into the routine physical rehabilitation care field</article-title>. <source>Sensors</source>. (<year>2020</year>) <volume>20</volume>:<fpage>5744</fpage>. <pub-id pub-id-type="doi">10.3390/s20205744</pub-id><pub-id pub-id-type="pmid">33050368</pub-id></citation></ref>
<ref id="B87"><label>87.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Kazanzides</surname><given-names>P</given-names></name><name><surname>Chen</surname><given-names>Z</given-names></name><name><surname>Deguet</surname><given-names>A</given-names></name><name><surname>Fischer</surname><given-names>GS</given-names></name><name><surname>Taylor</surname><given-names>RH</given-names></name><name><surname>DiMaio</surname><given-names>SP</given-names></name></person-group>. <comment>An open-source research kit for the da Vinci&#x00AE; surgical system. In: <italic>2014 IEEE international conference on robotics and automation (ICRA)</italic>. IEEE (2014). p. 6434&#x2013;6439</comment>. <pub-id pub-id-type="doi">10.1109/ICRA.2014.6907809</pub-id></citation></ref>
<ref id="B88"><label>88.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alaker</surname><given-names>M</given-names></name><name><surname>Wynn</surname><given-names>GR</given-names></name><name><surname>Arulampalam</surname><given-names>T</given-names></name></person-group>. <article-title>Virtual reality training in laparoscopic surgery: a systematic review &#x0026; meta-analysis</article-title>. <source>Int J Surg</source>. (<year>2016</year>) <volume>29</volume>:<fpage>85</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijsu.2016.03.034</pub-id><pub-id pub-id-type="pmid">26992652</pub-id></citation></ref>
<ref id="B89"><label>89.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fard</surname><given-names>MJ</given-names></name><name><surname>Ameri</surname><given-names>S</given-names></name><name><surname>Darin Ellis</surname><given-names>R</given-names></name><name><surname>Chinnam</surname><given-names>RB</given-names></name><name><surname>Pandya</surname><given-names>AK</given-names></name><name><surname>Klein</surname><given-names>MD</given-names></name></person-group>. <article-title>Automated robot-assisted surgical skill evaluation: predictive analytics approach</article-title>. <source>Int J Med Robot Comput Assist Surg</source>. (<year>2018</year>) <volume>14</volume>:<fpage>e1850</fpage>. <pub-id pub-id-type="doi">10.1002/rcs.1850</pub-id></citation></ref>
<ref id="B90"><label>90.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ebina</surname><given-names>K</given-names></name><name><surname>Abe</surname><given-names>T</given-names></name><name><surname>Higuchi</surname><given-names>M</given-names></name><name><surname>Furumido</surname><given-names>J</given-names></name><name><surname>Iwahara</surname><given-names>N</given-names></name><name><surname>Kon</surname><given-names>M</given-names></name></person-group>, et al. <article-title>Motion analysis for better understanding of psychomotor skills in laparoscopy: objective assessment-based simulation training using animal organs</article-title>. <source>Surg Endosc</source>. (<year>2021</year>) <volume>35</volume>:<fpage>4399</fpage>&#x2013;<lpage>416</lpage>. <pub-id pub-id-type="doi">10.1007/s00464-020-07940-7</pub-id><pub-id pub-id-type="pmid">32909201</pub-id></citation></ref>
<ref id="B91"><label>91.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Gao</surname><given-names>Y</given-names></name><name><surname>Vedula</surname><given-names>SS</given-names></name><name><surname>Reiley</surname><given-names>CE</given-names></name><name><surname>Ahmidi</surname><given-names>N</given-names></name><name><surname>Varadarajan</surname><given-names>B</given-names></name><name><surname>Lin</surname><given-names>HC</given-names></name></person-group>, et al. <comment>JHU-ISI gesture and skill assessment working set (JIGSAWS): a surgical activity dataset for human motion modeling. In: <italic>MICCAI Workshop: M2CAI</italic>. Vol. 3. Boston, Massachusetts (2014). p. 3</comment>.</citation></ref>
<ref id="B92"><label>92.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ahmidi</surname><given-names>N</given-names></name><name><surname>Tao</surname><given-names>L</given-names></name><name><surname>Sefati</surname><given-names>S</given-names></name><name><surname>Gao</surname><given-names>Y</given-names></name><name><surname>Lea</surname><given-names>C</given-names></name><name><surname>Haro</surname><given-names>BB</given-names></name></person-group>, et al. <article-title>A dataset and benchmarks for segmentation and recognition of gestures in robotic surgery</article-title>. <source>IEEE Trans Biomed Eng</source>. (<year>2017</year>) <volume>64</volume>:<fpage>2025</fpage>&#x2013;<lpage>41</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2016.2647680</pub-id><pub-id pub-id-type="pmid">28060703</pub-id></citation></ref>
<ref id="B93"><label>93.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Takayasu</surname><given-names>K</given-names></name><name><surname>Yoshida</surname><given-names>K</given-names></name><name><surname>Mishima</surname><given-names>T</given-names></name><name><surname>Watanabe</surname><given-names>M</given-names></name><name><surname>Matsuda</surname><given-names>T</given-names></name><name><surname>Kinoshita</surname><given-names>H</given-names></name></person-group>. <article-title>Upper body position analysis of different experience level surgeons during laparoscopic suturing maneuvers using optical motion capture</article-title>. <source>Am J Surg</source>. (<year>2019</year>) <volume>217</volume>:<fpage>12</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.amjsurg.2018.06.026</pub-id><pub-id pub-id-type="pmid">30017308</pub-id></citation></ref>
<ref id="B94"><label>94.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Impedovo</surname><given-names>D</given-names></name></person-group>. <article-title>Velocity-based signal features for the assessment of parkinsonian handwriting</article-title>. <source>IEEE Signal Process Lett</source>. (<year>2019</year>) <volume>26</volume>:<fpage>632</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1109/LSP.2019.2902936</pub-id></citation></ref>
<ref id="B95"><label>95.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Asselborn</surname><given-names>T</given-names></name><name><surname>Gargot</surname><given-names>T</given-names></name><name><surname>Kidzi&#x0144;ski</surname><given-names>&#x0141;</given-names></name><name><surname>Johal</surname><given-names>W</given-names></name><name><surname>Cohen</surname><given-names>D</given-names></name><name><surname>Jolly</surname><given-names>C</given-names></name></person-group>, et al. <article-title>Automated human-level diagnosis of dysgraphia using a consumer tablet</article-title>. <source>NPJ Digit Med</source>. (<year>2018</year>) <volume>1</volume>(<issue>1</issue>). <pub-id pub-id-type="doi">10.1038/s41746-018-0049-x</pub-id><pub-id pub-id-type="pmid">31304322</pub-id></citation></ref>
<ref id="B96"><label>96.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kanko</surname><given-names>RM</given-names></name><name><surname>Laende</surname><given-names>E</given-names></name><name><surname>Selbie</surname><given-names>WS</given-names></name><name><surname>Deluzio</surname><given-names>KJ</given-names></name></person-group>. <article-title>Inter-session repeatability of markerless motion capture gait kinematics</article-title>. <source>J Biomech</source>. (<year>2021</year>) <volume>121</volume>:<fpage>110422</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2021.110422</pub-id><pub-id pub-id-type="pmid">33873117</pub-id></citation></ref>
<ref id="B97"><label>97.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Engdahl</surname><given-names>SM</given-names></name><name><surname>Gates</surname><given-names>DH</given-names></name></person-group>. <article-title>Reliability of upper limb movement quality metrics during everyday tasks</article-title>. <source>Gait Posture</source>. (<year>2019</year>) <volume>71</volume>:<fpage>253</fpage>&#x2013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1016/j.gaitpost.2019.04.023</pub-id><pub-id pub-id-type="pmid">31096132</pub-id></citation></ref>
<ref id="B98"><label>98.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lea</surname><given-names>C</given-names></name></person-group>. <comment><italic>Multi-modal models for fine-grained action segmentation in situated environments</italic> [PhD thesis]. Baltimore (MD): Johns Hopkins University</comment>.</citation></ref>
<ref id="B99"><label>99.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>H-B</given-names></name><name><surname>Zhang</surname><given-names>Y-X</given-names></name><name><surname>Zhong</surname><given-names>B</given-names></name><name><surname>Lei</surname><given-names>Q</given-names></name><name><surname>Yang</surname><given-names>L</given-names></name><name><surname>Du</surname><given-names>J-X</given-names></name></person-group>, et al. <article-title>A comprehensive survey of vision-based human action recognition methods</article-title>. <source>Sensors</source>. (<year>2019</year>) <volume>19</volume>:<fpage>1005</fpage>. <pub-id pub-id-type="doi">10.3390/s19051005</pub-id><pub-id pub-id-type="pmid">30818796</pub-id></citation></ref>
<ref id="B100"><label>100.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname><given-names>X</given-names></name><name><surname>Dai</surname><given-names>J</given-names></name><name><surname>Li</surname><given-names>M</given-names></name><name><surname>Peng</surname><given-names>C</given-names></name><name><surname>Li</surname><given-names>Y</given-names></name><name><surname>Du</surname><given-names>S</given-names></name></person-group>. <article-title>Online human action detection and anticipation in videos: a survey</article-title>. <source>Neurocomputing</source>. (<year>2022</year>) <volume>491</volume>:<fpage>395</fpage>&#x2013;<lpage>413</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2022.03.069</pub-id></citation></ref>
<ref id="B101"><label>101.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kadu</surname><given-names>H</given-names></name><name><surname>Kuo</surname><given-names>C-CJ</given-names></name></person-group>. <article-title>Automatic human mocap data classification</article-title>. <source>IEEE Trans Multimed</source>. (<year>2014</year>) <volume>16</volume>:<fpage>2191</fpage>&#x2013;<lpage>202</lpage>. <pub-id pub-id-type="doi">10.1109/TMM.2014.2360793</pub-id></citation></ref>
<ref id="B102"><label>102.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Luo</surname><given-names>Z</given-names></name><name><surname>Xie</surname><given-names>W</given-names></name><name><surname>Kapoor</surname><given-names>S</given-names></name><name><surname>Liang</surname><given-names>Y</given-names></name><name><surname>Cooper</surname><given-names>M</given-names></name><name><surname>Niebles</surname><given-names>JC</given-names></name></person-group>, et al. <comment>MOMA: multi-object multi-actor activity parsing. In: <italic>Advances in neural information processing systems</italic>. Vol. 34. Curran Associates, Inc. (2021). p. 17939&#x2013;17955</comment>.</citation></ref>
<ref id="B103"><label>103.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>David</surname><given-names>A</given-names></name><name><surname>Subash</surname><given-names>T</given-names></name><name><surname>Varadhan</surname><given-names>S</given-names></name><name><surname>Melendez-Calderon</surname><given-names>A</given-names></name><name><surname>Balasubramanian</surname><given-names>S</given-names></name></person-group>. <article-title>A framework for sensor-based assessment of upper-limb functioning in hemiparesis</article-title>. <source>Front Hum Neurosci</source>. (<year>2021</year>) <volume>15</volume>:<fpage>667509</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2021.667509</pub-id><pub-id pub-id-type="pmid">34366809</pub-id></citation></ref>
<ref id="B104"><label>104.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Stein</surname><given-names>S</given-names></name><name><surname>McKenna</surname><given-names>SJ</given-names></name></person-group>. <comment>Combining embedded accelerometers with computer vision for recognizing food preparation activities. In: <italic>Proceedings of the 2013 ACM International Joint Conference on Pervasive, Ubiquitous Computing</italic>. Zurich, Switzerland (2013). p. 729&#x2013;738</comment>. <pub-id pub-id-type="doi">10.1145/2493432.2493482</pub-id>. <comment>Data set URL: <ext-link ext-link-type="uri" xlink:href="https://cvip.computing.dundee.ac.uk/datasets/foodpreparation/50salads/">https://cvip.computing.dundee.ac.uk/datasets/foodpreparation/50salads/</ext-link></comment></citation></ref>
<ref id="B105"><label>105.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Balasubramanian</surname><given-names>S</given-names></name><name><surname>Melendez-Calderon</surname><given-names>A</given-names></name><name><surname>Roby-Brami</surname><given-names>A</given-names></name><name><surname>Burdet</surname><given-names>E</given-names></name></person-group>. <article-title>On the analysis of movement smoothness</article-title>. <source>J Neuroeng Rehabil</source>. (<year>2015</year>) <volume>12</volume>(<issue>1</issue>). <pub-id pub-id-type="doi">10.1186/s12984-015-0090-9</pub-id><pub-id pub-id-type="pmid">26651329</pub-id></citation></ref>
<ref id="B106"><label>106.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Sakai</surname><given-names>H</given-names></name><name><surname>Furui</surname><given-names>A</given-names></name><name><surname>Hama</surname><given-names>S</given-names></name><name><surname>Yanagawa</surname><given-names>A</given-names></name><name><surname>Kubo</surname><given-names>K</given-names></name><name><surname>Morisako</surname><given-names>Y</given-names></name></person-group>, et al. <comment>Pen-point trajectory analysis during trail making test based on a time base generator model. In: <italic>2021 43rd Annual International Conference of the IEEE Engineering in Medicine &#x0026; Biology Society (EMBC)</italic>. IEEE (2021). p. 6215&#x2013;6219</comment>. <pub-id pub-id-type="doi">10.1109/EMBC46164.2021.9629991</pub-id></citation></ref>
<ref id="B107"><label>107.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Zia</surname><given-names>A</given-names></name><name><surname>Sharma</surname><given-names>Y</given-names></name><name><surname>Bettadapura</surname><given-names>V</given-names></name><name><surname>Sarin</surname><given-names>EL</given-names></name><name><surname>Clements</surname><given-names>MA</given-names></name><name><surname>Essa</surname><given-names>I</given-names></name></person-group>. <comment>Automated assessment of surgical skills using frequency analysis. In: <italic>International Conference on Medical Image Computing and Computer-Assisted Intervention &#x2013; MICCAI 2015</italic>. Springer (2015). p. 430&#x2013;438</comment>. <pub-id pub-id-type="doi">10.1007/978-3-319-24553-9-53</pub-id></citation></ref>
<ref id="B108"><label>108.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lin</surname><given-names>J</given-names></name></person-group>. <comment><italic>Temporal segmentation of human motion for rehabilitation</italic> [PhD thesis]. Ontario, Canada: University of Waterloo</comment>.</citation></ref>
<ref id="B109"><label>109.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kulic</surname><given-names>D</given-names></name><name><surname>Takano</surname><given-names>W</given-names></name><name><surname>Nakamura</surname><given-names>Y</given-names></name></person-group>. <article-title>Online segmentation and clustering from continuous observation of whole body motions</article-title>. <source>IEEE Trans Robot</source>. (<year>2009</year>) <volume>25</volume>:<fpage>1158</fpage>&#x2013;<lpage>66</lpage>. <pub-id pub-id-type="doi">10.1109/TRO.2009.2026508</pub-id></citation></ref>
<ref id="B110"><label>110.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Kuehne</surname><given-names>H</given-names></name><name><surname>Arslan</surname><given-names>A</given-names></name><name><surname>Serre</surname><given-names>T</given-names></name></person-group>. <comment>The language of actions: recovering the syntax and semantics of goal-directed human activities. In: <italic>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</italic>. Columbus, Ohio (2014). p. 780&#x2013;787</comment>. <pub-id pub-id-type="doi">10.1109/CVPR.2014.105</pub-id>. <comment>Data set URL: <ext-link ext-link-type="uri" xlink:href="https://serre-lab.clps.brown.edu/resource/breakfast-actions-dataset/">https://serre-lab.clps.brown.edu/resource/breakfast-actions-dataset/</ext-link></comment></citation></ref>
<ref id="B111"><label>111.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Li</surname><given-names>Y</given-names></name><name><surname>Liu</surname><given-names>M</given-names></name><name><surname>Rehg</surname><given-names>JM</given-names></name></person-group>. <comment>In the eye of beholder: joint learning of gaze and actions in first person video. In: <italic>Proceedings of the European Conference on Computer Vision (ECCV)</italic>. Munich, Germany (2018). p. 619&#x2013;635</comment>. <pub-id pub-id-type="doi">10.1007/978-3-030-01228-1-38</pub-id>. <comment>Data set URL: <ext-link ext-link-type="uri" xlink:href="https://cbs.ic.gatech.edu/fpv/">https://cbs.ic.gatech.edu/fpv/</ext-link></comment></citation></ref>
<ref id="B112"><label>112.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Tenorth</surname><given-names>M</given-names></name><name><surname>Bandouch</surname><given-names>J</given-names></name><name><surname>Beetz</surname><given-names>M</given-names></name></person-group>. <comment>The TUM kitchen data set of everyday manipulation activities for motion tracking and action recognition. In: <italic>2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops</italic>. IEEE (2009). p. 1089&#x2013;1096</comment>. <pub-id pub-id-type="doi">10.1109/ICCVW.2009.5457583</pub-id>. <comment>Data set URL: <ext-link ext-link-type="uri" xlink:href="https://ias.in.tum.de/dokuwiki/software/kitchen-activity-data">https://ias.in.tum.de/dokuwiki/software/kitchen-activity-data</ext-link></comment></citation></ref>
<ref id="B113"><label>113.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Parsa</surname><given-names>B</given-names></name><name><surname>Samani</surname><given-names>EU</given-names></name><name><surname>Hendrix</surname><given-names>R</given-names></name><name><surname>Devine</surname><given-names>C</given-names></name><name><surname>Singh</surname><given-names>SM</given-names></name><name><surname>Devasia</surname><given-names>S</given-names></name></person-group>, et al. <article-title>Toward ergonomic risk prediction via segmentation of indoor object manipulation actions using spatiotemporal convolutional networks</article-title>. <source>IEEE Robot Autom Lett</source>. (<year>2019</year>) <volume>4</volume>:<fpage>3153</fpage>&#x2013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1109/LRA.2019.2925305</pub-id></citation></ref>
<ref id="B114"><label>114.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Niemann</surname><given-names>F</given-names></name><name><surname>Reining</surname><given-names>C</given-names></name><name><surname>Moya Rueda</surname><given-names>F</given-names></name><name><surname>Nair</surname><given-names>NR</given-names></name><name><surname>Steffens</surname><given-names>JA</given-names></name><name><surname>Fink</surname><given-names>GA</given-names></name></person-group>, et al. <article-title>LARa: creating a dataset for human activity recognition in logistics using semantic attributes</article-title>. <source>Sensors</source>. (<year>2020</year>) <volume>20</volume>:<fpage>4083</fpage>. <pub-id pub-id-type="doi">10.3390/s20154083</pub-id><pub-id pub-id-type="pmid">32707928</pub-id></citation></ref>
<ref id="B115"><label>115.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Niemann</surname><given-names>F</given-names></name><name><surname>L&#x00FC;dtke</surname><given-names>S</given-names></name><name><surname>Bartelt</surname><given-names>C</given-names></name><name><surname>Ten Hompel</surname><given-names>M</given-names></name></person-group>. <article-title>Context-aware human activity recognition in industrial processes</article-title>. <source>Sensors</source>. (<year>2022</year>) <volume>22</volume>:<fpage>134</fpage>. <pub-id pub-id-type="doi">10.3390/s22010134</pub-id></citation></ref>
<ref id="B116"><label>116.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Li</surname><given-names>A</given-names></name><name><surname>Thotakuri</surname><given-names>M</given-names></name><name><surname>Ross</surname><given-names>DA</given-names></name><name><surname>Carreira</surname><given-names>J</given-names></name><name><surname>Vostrikov</surname><given-names>A</given-names></name><name><surname>Zisserman</surname><given-names>A</given-names></name></person-group>. <comment>The AVA-Kinetics localized human actions video dataset [Preprint] (2020). Available at: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.48550/arXiv.2005.00214">https://doi.org/10.48550/arXiv.2005.00214</ext-link>. Data set URL: <ext-link ext-link-type="uri" xlink:href="https://research.google.com/ava/">https://research.google.com/ava/</ext-link></comment></citation></ref>
<ref id="B117"><label>117.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Goyal</surname><given-names>R</given-names></name><name><surname>Ebrahimi Kahou</surname><given-names>S</given-names></name><name><surname>Michalski</surname><given-names>V</given-names></name><name><surname>Materzynska</surname><given-names>J</given-names></name><name><surname>Westphal</surname><given-names>S</given-names></name><name><surname>Kim</surname><given-names>H</given-names></name></person-group>, et al. <comment>The &#x201C;something something&#x201D; video database for learning and evaluating visual common sense. In: <italic>Proceedings of the IEEE international conference on computer vision</italic>. Venice, Italy (2017). p. 5842&#x2013;5850</comment>. <pub-id pub-id-type="doi">10.1109/ICCV.2017.622</pub-id>. <comment>Data set URL: <ext-link ext-link-type="uri" xlink:href="https://developer.qualcomm.com/software/ai-datasets/something-something">https://developer.qualcomm.com/software/ai-datasets/something-something</ext-link></comment></citation></ref>
<ref id="B118"><label>118.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Mahdisoltani</surname><given-names>F</given-names></name><name><surname>Berger</surname><given-names>G</given-names></name><name><surname>Gharbieh</surname><given-names>W</given-names></name><name><surname>Fleet</surname><given-names>D</given-names></name><name><surname>Memisevic</surname><given-names>R</given-names></name></person-group>. <comment>On the effectiveness of task granularity for transfer learning [Preprint] (2018). Available at: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.48550/arXiv.1804.09235">https://doi.org/10.48550/arXiv.1804.09235</ext-link></comment></citation></ref>
<ref id="B119"><label>119.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Kuehne</surname><given-names>H</given-names></name><name><surname>Jhuang</surname><given-names>H</given-names></name><name><surname>Garrote</surname><given-names>E</given-names></name><name><surname>Poggio</surname><given-names>T</given-names></name><name><surname>Serre</surname><given-names>T</given-names></name></person-group>. <comment>HMDB: a large video database for human motion recognition. In: <italic>Proceedings of the 2011 International Conference on Computer Vision (ICCV)</italic>. Barcelona, Spain (2011)</comment>. <pub-id pub-id-type="doi">10.1109/ICCV.2011.6126543</pub-id>. <comment>Data set URL: <ext-link ext-link-type="uri" xlink:href="https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/">https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/</ext-link></comment></citation></ref>
<ref id="B120"><label>120.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Soomro</surname><given-names>K</given-names></name><name><surname>Zamir</surname><given-names>AR</given-names></name><name><surname>Shah</surname><given-names>M</given-names></name></person-group>. <comment>UCF101: a dataset of 101 human actions classes from videos in the wild [Preprint] (2012). Available at: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.48550/arXiv.1212.0402">https://doi.org/10.48550/arXiv.1212.0402</ext-link></comment></citation></ref>
<ref id="B121"><label>121.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Ji</surname><given-names>J</given-names></name><name><surname>Krishna</surname><given-names>R</given-names></name><name><surname>Fei-Fei</surname><given-names>L</given-names></name><name><surname>Niebles</surname><given-names>JC</given-names></name></person-group>. <comment>Action genome: actions as compositions of spatio-temporal scene graphs. In: <italic>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</italic>. Virtual (2020). p. 10236&#x2013;10247</comment>.</citation></ref>
<ref id="B122"><label>122.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Grauman</surname><given-names>K</given-names></name><name><surname>Westbury</surname><given-names>A</given-names></name><name><surname>Byrne</surname><given-names>E</given-names></name><name><surname>Chavis</surname><given-names>Z</given-names></name><name><surname>Furnari</surname><given-names>A</given-names></name><name><surname>Girdhar</surname><given-names>R</given-names></name></person-group>, et al <comment>Ego4D: around the world in 3,000&#x2009;h of egocentric video. In: <italic>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</italic>. New Orleans, Louisiana (2022). p. 18995&#x2013;19012</comment>. <pub-id pub-id-type="doi">10.1109/CVPR52688.2022.01842</pub-id>. <comment>Data set URL: <ext-link ext-link-type="uri" xlink:href="https://ego4d-data.org/">https://ego4d-data.org/</ext-link></comment></citation></ref>
<ref id="B123"><label>123.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Li</surname><given-names>C</given-names></name><name><surname>Zhang</surname><given-names>R</given-names></name><name><surname>Wong</surname><given-names>J</given-names></name><name><surname>Gokmen</surname><given-names>C</given-names></name><name><surname>Srivastava</surname><given-names>S</given-names></name><name><surname>Mart&#x00ED;n-Mart&#x00ED;n</surname><given-names>R</given-names></name></person-group>, et al. <comment>BEHAVIOR-1K: a benchmark for embodied AI with 1,000 everyday activities and realistic simulation. In: <italic>6th Annual Conference on Robot Learning</italic>. Auckland, New Zealand (2022). Data set URL: <ext-link ext-link-type="uri" xlink:href="https://behavior.stanford.edu/">https://behavior.stanford.edu/</ext-link></comment></citation></ref>
<ref id="B124"><label>124.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lea</surname><given-names>C</given-names></name><name><surname>Hager</surname><given-names>GD</given-names></name><name><surname>Vidal</surname><given-names>R</given-names></name></person-group>. <comment>An improved model for segmentation and recognition of fine-grained activities with application to surgical training tasks. In: <italic>2015 IEEE Winter Conference on Applications of Computer Vision</italic>. IEEE (2015). p. 1123&#x2013;1129</comment>. <pub-id pub-id-type="doi">10.1109/WACV.2015.154</pub-id></citation></ref>
<ref id="B125"><label>125.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lea</surname><given-names>C</given-names></name><name><surname>Vidal</surname><given-names>R</given-names></name><name><surname>Hager</surname><given-names>GD</given-names></name></person-group>. <comment>Learning convolutional action primitives for fine-grained action recognition. In: <italic>2016 IEEE International Conference on Robotics and Automation (ICRA)</italic>. IEEE (2016). p. 1642&#x2013;1649</comment>. <pub-id pub-id-type="doi">10.1109/ICRA.2016.7487305</pub-id></citation></ref>
<ref id="B126"><label>126.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Quellec</surname><given-names>G</given-names></name><name><surname>Lamard</surname><given-names>M</given-names></name><name><surname>Cochener</surname><given-names>B</given-names></name><name><surname>Cazuguel</surname><given-names>G</given-names></name></person-group>. <article-title>Real-time segmentation and recognition of surgical tasks in cataract surgery videos</article-title>. <source>IEEE Trans Med Imaging</source>. (<year>2014</year>) <volume>33</volume>:<fpage>2352</fpage>&#x2013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1109/TMI.2014.2340473</pub-id><pub-id pub-id-type="pmid">25055383</pub-id></citation></ref>
<ref id="B127"><label>127.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Forestier</surname><given-names>G</given-names></name><name><surname>Petitjean</surname><given-names>F</given-names></name><name><surname>Senin</surname><given-names>P</given-names></name><name><surname>Despinoy</surname><given-names>F</given-names></name><name><surname>Huaulm&#x00E9;</surname><given-names>A</given-names></name><name><surname>Fawaz</surname><given-names>HI</given-names></name></person-group>, et al. <article-title>Surgical motion analysis using discriminative interpretable patterns</article-title>. <source>Artif Intell Med</source>. (<year>2018</year>) <volume>91</volume>:<fpage>3</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1016/j.artmed.2018.08.002</pub-id><pub-id pub-id-type="pmid">30172445</pub-id></citation></ref>
<ref id="B128"><label>128.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lei</surname><given-names>J</given-names></name><name><surname>Ren</surname><given-names>X</given-names></name><name><surname>Fox</surname><given-names>D</given-names></name></person-group>. <comment>Fine-grained kitchen activity recognition using RGB-D. In: <italic>Proceedings of the 2012 ACM Conference on Ubiquitous Computing</italic>. Pittsburgh, Pennsylvania (2012). p. 208&#x2013;211</comment>. <pub-id pub-id-type="doi">10.1145/2370216.2370248</pub-id></citation></ref>
<ref id="B129"><label>129.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Krishnan</surname><given-names>S</given-names></name><name><surname>Garg</surname><given-names>A</given-names></name><name><surname>Patil</surname><given-names>S</given-names></name><name><surname>Lea</surname><given-names>C</given-names></name><name><surname>Hager</surname><given-names>G</given-names></name><name><surname>Abbeel</surname><given-names>P</given-names></name></person-group>, et al. <article-title>Transition state clustering: unsupervised surgical trajectory segmentation for robot learning</article-title>. <source>Int J Rob Res</source>. (<year>2017</year>) <volume>36</volume>:<fpage>1595</fpage>&#x2013;<lpage>618</lpage>. <pub-id pub-id-type="doi">10.1177/0278364917743319</pub-id></citation></ref>
<ref id="B130"><label>130.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kr&#x00FC;ger</surname><given-names>B</given-names></name><name><surname>V&#x00F6;gele</surname><given-names>A</given-names></name><name><surname>Willig</surname><given-names>T</given-names></name><name><surname>Yao</surname><given-names>A</given-names></name><name><surname>Klein</surname><given-names>R</given-names></name><name><surname>Weber</surname><given-names>A</given-names></name></person-group>. <article-title>Efficient unsupervised temporal segmentation of motion data</article-title>. <source>IEEE Trans Multimed</source>. (<year>2016</year>) <volume>19</volume>:<fpage>797</fpage>&#x2013;<lpage>812</lpage>. <pub-id pub-id-type="doi">10.1109/TMM.2016.2635030</pub-id></citation></ref>
<ref id="B131"><label>131.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Despinoy</surname><given-names>F</given-names></name><name><surname>Bouget</surname><given-names>D</given-names></name><name><surname>Forestier</surname><given-names>G</given-names></name><name><surname>Penet</surname><given-names>C</given-names></name><name><surname>Zemiti</surname><given-names>N</given-names></name><name><surname>Poignet</surname><given-names>P</given-names></name></person-group>, et al. <article-title>Unsupervised trajectory segmentation for surgical gesture recognition in robotic training</article-title>. <source>IEEE Trans Biomed Eng</source>. (<year>2015</year>) <volume>63</volume>:<fpage>1280</fpage>&#x2013;<lpage>91</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2015.2493100</pub-id><pub-id pub-id-type="pmid">26513773</pub-id></citation></ref>
<ref id="B132"><label>132.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>X</given-names></name><name><surname>Santago II</surname><given-names>AC</given-names></name><name><surname>Vidt</surname><given-names>ME</given-names></name><name><surname>Saul</surname><given-names>KR</given-names></name></person-group>. <article-title>Analysis of effects of loading and postural demands on upper limb reaching in older adults using statistical parametric mapping</article-title>. <source>J Biomech</source>. (<year>2016</year>) <volume>49</volume>:<fpage>2806</fpage>&#x2013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2016.06.018</pub-id><pub-id pub-id-type="pmid">27435566</pub-id></citation></ref>
<ref id="B133"><label>133.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>van Vliet</surname><given-names>P</given-names></name><name><surname>Pelton</surname><given-names>TA</given-names></name><name><surname>Hollands</surname><given-names>KL</given-names></name><name><surname>Carey</surname><given-names>L</given-names></name><name><surname>Wing</surname><given-names>AM</given-names></name></person-group>. <article-title>Neuroscience findings on coordination of reaching to grasp an object: implications for research</article-title>. <source>Neurorehabil Neural Repair</source>. (<year>2013</year>) <volume>27</volume>:<fpage>622</fpage>&#x2013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.1177/1545968313483578</pub-id><pub-id pub-id-type="pmid">23569173</pub-id></citation></ref>
<ref id="B134"><label>134.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Popa</surname><given-names>D</given-names></name><name><surname>Simion</surname><given-names>G</given-names></name><name><surname>Gui</surname><given-names>V</given-names></name><name><surname>Otesteanu</surname><given-names>M</given-names></name></person-group>. <article-title>Real time trajectory based hand gesture recognition</article-title>. <source>WSEAS Trans Inf Sci Appl</source>. (<year>2008</year>) <volume>5</volume>:<fpage>532</fpage>&#x2013;<lpage>46</lpage>. Available at: <ext-link ext-link-type="uri" xlink:href="https://dl.acm.org/doi/abs/10.5555/1481952.1481972">https://dl.acm.org/doi/abs/10.5555/1481952.1481972</ext-link></citation></ref>
<ref id="B135"><label>135.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Mao</surname><given-names>R</given-names></name><name><surname>Yang</surname><given-names>Y</given-names></name><name><surname>Ferm&#x00FC;ller</surname><given-names>C</given-names></name><name><surname>Aloimonos</surname><given-names>Y</given-names></name><name><surname>Baras</surname><given-names>JS</given-names></name></person-group>. <comment>Learning hand movements from markerless demonstrations for humanoid tasks. In: <italic>2014 IEEE-RAS International Conference on Humanoid Robots</italic>. IEEE (2014). p. 938&#x2013;943</comment>. <pub-id pub-id-type="doi">10.1109/HUMANOIDS.2014.7041476</pub-id></citation></ref>
<ref id="B136"><label>136.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Lopes</surname><given-names>DS</given-names></name><name><surname>Faria</surname><given-names>A</given-names></name><name><surname>Barriga</surname><given-names>A</given-names></name><name><surname>Caneira</surname><given-names>S</given-names></name><name><surname>Baptista</surname><given-names>F</given-names></name><name><surname>Matos</surname><given-names>C</given-names></name></person-group>, et al. <comment>Visual biofeedback for upper limb compensatory movements: a preliminary study next to rehabilitation professionals. In: <italic>EuroVis 2019 - Posters</italic>. Porto, Portugal (2019). p. 33&#x2013;35</comment>. <pub-id pub-id-type="doi">10.2312/eurp.20191139</pub-id></citation></ref>
<ref id="B137"><label>137.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Adans-Dester</surname><given-names>C</given-names></name><name><surname>Hankov</surname><given-names>N</given-names></name><name><surname>O&#x2019;Brien</surname><given-names>A</given-names></name><name><surname>Vergara-Diaz</surname><given-names>G</given-names></name><name><surname>Black-Schaffer</surname><given-names>R</given-names></name><name><surname>Zafonte</surname><given-names>R</given-names></name></person-group>, et al. <article-title>Enabling precision rehabilitation interventions using wearable sensors, machine learning to track motor recovery</article-title>. <source>NPJ Digit Med</source>. (<year>2020</year>) <volume>3</volume>(<issue>1</issue>). <pub-id pub-id-type="doi">10.1038/s41746-020-00328-w</pub-id><pub-id pub-id-type="pmid">33024831</pub-id></citation></ref>
<ref id="B138"><label>138.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barth</surname><given-names>J</given-names></name><name><surname>Waddell</surname><given-names>KJ</given-names></name><name><surname>Bland</surname><given-names>MD</given-names></name><name><surname>Lang</surname><given-names>CE</given-names></name></person-group>. <article-title>Accuracy of an algorithm in predicting upper limb functional capacity in a united states population</article-title>. <source>Arch Phys Med Rehabil</source>. (<year>2022</year>) <volume>103</volume>:<fpage>44</fpage>&#x2013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.1016/j.apmr.2021.07.808</pub-id><pub-id pub-id-type="pmid">34425091</pub-id></citation></ref>
<ref id="B139"><label>139.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jiang</surname><given-names>J</given-names></name><name><surname>Xing</surname><given-names>Y</given-names></name><name><surname>Wang</surname><given-names>S</given-names></name><name><surname>Liang</surname><given-names>K</given-names></name></person-group>. <article-title>Evaluation of robotic surgery skills using dynamic time warping</article-title>. <source>Comput Methods Programs Biomed</source>. (<year>2017</year>) <volume>152</volume>:<fpage>71</fpage>&#x2013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1016/j.cmpb.2017.09.007</pub-id><pub-id pub-id-type="pmid">29054262</pub-id></citation></ref>
<ref id="B140"><label>140.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Thies</surname><given-names>SB</given-names></name><name><surname>Kenney</surname><given-names>LP</given-names></name><name><surname>Sobuh</surname><given-names>M</given-names></name><name><surname>Galpin</surname><given-names>A</given-names></name><name><surname>Kyberd</surname><given-names>P</given-names></name><name><surname>Stine</surname><given-names>R</given-names></name></person-group>, et al. <article-title>Skill assessment in upper limb myoelectric prosthesis users: validation of a clinically feasible method for characterising upper limb temporal and amplitude variability during the performance of functional tasks</article-title>. <source>Med Eng Phys</source>. (<year>2017</year>) <volume>47</volume>:<fpage>137</fpage>&#x2013;<lpage>43</lpage>. <pub-id pub-id-type="doi">10.1016/j.medengphy.2017.03.010</pub-id><pub-id pub-id-type="pmid">28684214</pub-id></citation></ref>
<ref id="B141"><label>141.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Ahmidi</surname><given-names>N</given-names></name><name><surname>Gao</surname><given-names>Y</given-names></name><name><surname>B&#x00E9;jar</surname><given-names>B</given-names></name><name><surname>Vedula</surname><given-names>SS</given-names></name><name><surname>Khudanpur</surname><given-names>S</given-names></name><name><surname>Vidal</surname><given-names>R</given-names></name></person-group>, et al. <comment>String motif-based description of tool motion for detecting skill, gestures in robotic surgery. In: <italic>International Conference on Medical Image Computing and Computer-Assisted Intervention</italic>. Springer (2013). p. 26&#x2013;33</comment>. <pub-id pub-id-type="doi">10.1007/978-3-642-40811-3-4</pub-id></citation></ref>
<ref id="B142"><label>142.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>van der Maaten</surname><given-names>L</given-names></name><name><surname>Hinton</surname><given-names>G</given-names></name></person-group>. <article-title>Visualizing data using t-SNE</article-title>. <source>J Mach Learn Res</source>. (<year>2008</year>) <volume>9</volume>:<fpage>2579</fpage>&#x2013;<lpage>605</lpage>. Available at: <ext-link ext-link-type="uri" xlink:href="http://jmlr.org/papers/v9/vandermaaten08a.html">http://jmlr.org/papers/v9/vandermaaten08a.html</ext-link></citation></ref>
<ref id="B143"><label>143.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>McInnes</surname><given-names>L</given-names></name><name><surname>Healy</surname><given-names>J</given-names></name><name><surname>Melville</surname><given-names>J</given-names></name></person-group>. <comment>UMAP: uniform manifold approximation and projection for dimension reduction [Preprint] (48550/arXiv.1802.03426). Available at: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.48550/arXiv.1802.03426">https://doi.org/10.48550/arXiv.1802.03426</ext-link></comment></citation></ref>
<ref id="B144"><label>144.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pereira</surname><given-names>CR</given-names></name><name><surname>Pereira</surname><given-names>DR</given-names></name><name><surname>Weber</surname><given-names>SA</given-names></name><name><surname>Hook</surname><given-names>C</given-names></name><name><surname>de Albuquerque</surname><given-names>VHC</given-names></name><name><surname>Papa</surname><given-names>JP</given-names></name></person-group>. <article-title>A survey on computer-assisted Parkinson&#x2019;s disease diagnosis</article-title>. <source>Artif Intell Med</source>. (<year>2019</year>) <volume>95</volume>:<fpage>48</fpage>&#x2013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1016/j.artmed.2018.08.007</pub-id><pub-id pub-id-type="pmid">30201325</pub-id></citation></ref>
<ref id="B145"><label>145.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Angelillo</surname><given-names>MT</given-names></name><name><surname>Balducci</surname><given-names>F</given-names></name><name><surname>Impedovo</surname><given-names>D</given-names></name><name><surname>Pirlo</surname><given-names>G</given-names></name><name><surname>Vessio</surname><given-names>G</given-names></name></person-group>. <article-title>Attentional pattern classification for automatic dementia detection</article-title>. <source>IEEE Access</source>. (<year>2019</year>) <volume>7</volume>:<fpage>57706</fpage>&#x2013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2019.2913685</pub-id></citation></ref>
<ref id="B146"><label>146.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cilia</surname><given-names>ND</given-names></name><name><surname>De Gregorio</surname><given-names>G</given-names></name><name><surname>De Stefano</surname><given-names>C</given-names></name><name><surname>Fontanella</surname><given-names>F</given-names></name><name><surname>Marcelli</surname><given-names>A</given-names></name><name><surname>Parziale</surname><given-names>A</given-names></name></person-group>. <article-title>Diagnosing Alzheimer&#x2019;s disease from on-line handwriting: a novel dataset and performance benchmarking</article-title>. <source>Eng Appl Artif Intell</source>. (<year>2022</year>) <volume>111</volume>:<fpage>104822</fpage>. <pub-id pub-id-type="doi">10.1016/j.engappai.2022.104822</pub-id></citation></ref>
<ref id="B147"><label>147.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hughes</surname><given-names>CML</given-names></name><name><surname>Padilla</surname><given-names>A</given-names></name><name><surname>Hintze</surname><given-names>A</given-names></name><name><surname>Raymundo</surname><given-names>TM</given-names></name><name><surname>Sera</surname><given-names>M</given-names></name><name><surname>Weidner</surname><given-names>S</given-names></name></person-group>, et al. <article-title>Developing an mHealth app for post-stroke upper limb rehabilitation: feedback from us and Ethiopian rehabilitation clinicians</article-title>. <source>Health Informatics J</source>. (<year>2020</year>) <volume>26</volume>:<fpage>1104</fpage>&#x2013;<lpage>17</lpage>. <pub-id pub-id-type="doi">10.1177/1460458219868356</pub-id><pub-id pub-id-type="pmid">31566456</pub-id></citation></ref>
<ref id="B148"><label>148.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ploderer</surname><given-names>B</given-names></name><name><surname>Fong</surname><given-names>J</given-names></name><name><surname>Klaic</surname><given-names>M</given-names></name><name><surname>Nair</surname><given-names>S</given-names></name><name><surname>Vetere</surname><given-names>F</given-names></name><name><surname>Lizama</surname><given-names>LEC</given-names></name></person-group>, et al. <article-title>How therapists use visualizations of upper limb movement information from stroke patients: a qualitative study with simulated information</article-title>. <source>JMIR Rehabil Assist Technol</source>. (<year>2016</year>) <volume>3</volume>:<fpage>e6182</fpage>. <pub-id pub-id-type="doi">10.2196/rehab.6182</pub-id></citation></ref>
<ref id="B149"><label>149.</label><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jefford</surname><given-names>M</given-names></name><name><surname>Stockler</surname><given-names>MR</given-names></name><name><surname>Tattersall</surname><given-names>MH</given-names></name></person-group>. <article-title>Outcomes research: what is it and why does it matter?</article-title> <source>Intern Med J</source>. (<year>2003</year>) <volume>33</volume>:<fpage>110</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1046/j.1445-5994.2003.00302.x</pub-id><pub-id pub-id-type="pmid">12603584</pub-id></citation></ref>
<ref id="B150"><label>150.</label><citation citation-type="book"><collab>WHO</collab>. <source>Neurological disorders: public health challenges</source>. <publisher-loc>Geneva, Switzerland</publisher-loc>: <publisher-name>World Health Organization</publisher-name> (<year>2006</year>). <comment>Available from: <ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789241563369">https://www.who.int/publications/i/item/9789241563369</ext-link></comment></citation></ref>
<ref id="B151"><label>151.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>McDermott</surname><given-names>P</given-names></name><name><surname>Dominguez</surname><given-names>C</given-names></name><name><surname>Kasdaglis</surname><given-names>N</given-names></name><name><surname>Ryan</surname><given-names>M</given-names></name><name><surname>Trhan</surname><given-names>I</given-names></name><name><surname>Nelson</surname><given-names>A</given-names></name></person-group>. <comment><italic>Human-machine teaming systems engineering guide</italic> (Tech. rep.). Bedford (MA): The MITRE Corporation</comment>.</citation></ref>
<ref id="B152"><label>152.</label><citation citation-type="other"><person-group person-group-type="author"><name><surname>Morey</surname><given-names>DA</given-names></name><name><surname>Della Vella</surname><given-names>D</given-names></name><name><surname>Rayo</surname><given-names>MF</given-names></name><name><surname>Zelik</surname><given-names>DJ</given-names></name><name><surname>Murphy</surname><given-names>TB</given-names></name></person-group>. <comment>Joint activity testing: towards a multi-dimensional, high-resolution evaluation method for human-machine teaming. In: <italic>Proceedings of the Human Factors and Ergonomics Society Annual Meeting</italic>. Vol. 66. Los Angeles (CA): SAGE Publications Sage CA (2022). p. 2214&#x2013;2219</comment>. <pub-id pub-id-type="doi">10.1177/1071181322661537</pub-id></citation></ref></ref-list>
</back>
</article>