<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Bioeng. Biotechnol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Bioengineering and Biotechnology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Bioeng. Biotechnol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-4185</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1737916</article-id>
<article-id pub-id-type="doi">10.3389/fbioe.2025.1737916</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>OrientationNN: a physics-informed lightweight neural network for real-time joint kinematics estimation from IMU data</article-title>
<alt-title alt-title-type="left-running-head">Bian et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fbioe.2025.1737916">10.3389/fbioe.2025.1737916</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Bian</surname>
<given-names>Qingyao</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3179404"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wang</surname>
<given-names>Hongbo</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3256807"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Alsayed</surname>
<given-names>Khalid</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3311555"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Ding</surname>
<given-names>Ziyun</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3265030"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing &#x2013; review and editing</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>School of Engineering, University of Birmingham</institution>, <city>Birmingham</city>, <country country="GB">United Kingdom</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>School of Automation Engineering, Nanjing University of Information Science and Technology</institution>, <city>Nanjing</city>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>Faculty of Medical Rehabilitation Science, King Abdulaziz University</institution>, <city>Jeddah</city>, <country country="SA">Saudi Arabia</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Ziyun Ding&#x2009;, <email xlink:href="mailto:z.ding@bham.ac.uk">z.ding@bham.ac.uk</email>
</corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-12">
<day>12</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>13</volume>
<elocation-id>1737916</elocation-id>
<history>
<date date-type="received">
<day>02</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>15</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>29</day>
<month>12</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Bian, Wang, Alsayed and Ding.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Bian, Wang, Alsayed and Ding</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-12">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Accurate joint kinematics estimation is essential for understanding human movement and supporting biomechanical applications. Although optical motion capture systems are accurate, their high cost, complex setup, and limited portability restrict use outside laboratory environments. This study proposes a lightweight, physics-informed neural network for real-time joint kinematics estimation using inertial measurement units (IMUs).</p>
</sec>
<sec>
<title>Methods</title>
<p>We developed OrientationNN, which integrates orientation-based physical constraints into a compact multi-layer perceptron architecture to ensure biomechanically consistent joint kinematics estimation. The model was evaluated on a publicly available dataset and compared with a physics-based inverse kinematics framework (OpenSense) and conventional learning-based models including MLP, LSTM, CNN, and Transformer.</p>
</sec>
<sec>
<title>Results</title>
<p>OrientationNN achieved an average joint angle estimation error below 5&#x00B0; during ambulatory motion and consistently outperformed OpenSense across all kinematic variables. The model required only 4.9 &#x00D7; 10&#x00b3; FLOPs per frame and 10.8 KB of parameters, demonstrating high computational efficiency suitable for real-time applications.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>OrientationNN enables accurate and computationally efficient joint kinematics estimation from IMU data. The results highlight its potential as a cost-effective and scalable solution for wearable biomechanical and motion analysis applications.</p>
</sec>
</abstract>
<kwd-group>
<kwd>biomechanics</kwd>
<kwd>IMU</kwd>
<kwd>joint kinematics estimation</kwd>
<kwd>lightweight</kwd>
<kwd>physics-informed neural network</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the Engineering and Physical Sciences Research Council (EPSRC) under Grant EP/V057138/1.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="1"/>
<equation-count count="7"/>
<ref-count count="57"/>
<page-count count="11"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Biomechanics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Accurate estimation of joint kinematics is fundamental to understanding human movement, advancing biomechanical modelling, and supporting the design, assessment, and optimisation of interventions and assistive technologies. When achieved in real time, such estimation enables personalised rehabilitation therapy (<xref ref-type="bibr" rid="B37">Patel et al., 2012</xref>) and closed-loop control of powered prostheses and exoskeletons (<xref ref-type="bibr" rid="B24">Kawamoto et al., 2003</xref>), contributing to objective management of motor recovery and functional performance (<xref ref-type="bibr" rid="B6">Baker, 2006</xref>). While optical motion capture systems remain the gold standard for laboratory-based motion analysis, their reliance on controlled environments, expensive infrastructure, and complex calibration procedures severely limits their applicability in clinical settings and daily-living scenarios (<xref ref-type="bibr" rid="B10">Cappozzo et al., 2005</xref>). This gap between laboratory precision and real-world accessibility has motivated the development of wearable sensing technologies for continuous motion monitoring.</p>
<p>Inertial measurement units (IMUs) have emerged as a promising alternative, offering compact form factors, low cost, and environmental independence (<xref ref-type="bibr" rid="B44">Roetenberg et al., 2009</xref>). Recent studies have demonstrated that IMU-based systems can achieve accuracies comparable to optical systems for gait analysis in controlled settings (<xref ref-type="bibr" rid="B15">Cutti et al., 2010</xref>; <xref ref-type="bibr" rid="B3">Al-Amri et al., 2018</xref>), with comparative validation studies showing strong agreement between inertial sensor-based and camera-based measurements during treadmill walking and running (<xref ref-type="bibr" rid="B36">N&#xfc;esch et al., 2017</xref>), and confirming concurrent validity and within-session reliability when proper calibration protocols are applied (<xref ref-type="bibr" rid="B7">Berner et al., 2020</xref>). However, translating IMU-derived orientations into anatomically meaningful joint angles remains a challenging inverse problem, particularly when sensor-to-segment misalignment, soft tissue artifacts, and calibration errors are present (<xref ref-type="bibr" rid="B13">Cooper et al., 2009</xref>).</p>
<p>Two primary paradigms have been developed to address this challenge: physics-based methods and data-driven machine learning approaches. Physics-based methods, such as OpenSense (<xref ref-type="bibr" rid="B16">Delp et al., 2007</xref>) and Xsens MVN (<xref ref-type="bibr" rid="B44">Roetenberg et al., 2009</xref>), employ biomechanical models combined with inverse kinematics optimisation to reconstruct joint angles from IMU orientations. By incorporating anatomical constraints &#x2014; including bone-to-bone articulation, segmental parameter consistency, and physiologically valid joint ranges of motion &#x2014; these methods ensure biomechanical plausibility. However, they require iterative numerical optimisation, which incurs substantial computational cost (often <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>6</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> FLOPs per frame) (<xref ref-type="bibr" rid="B47">Seth et al., 2018</xref>). In addition to modelling errors, inaccuracies may also arise from IMU-related factors, including sensor characteristics, calibration errors, sensor placement variability, and soft tissue artefacts (<xref ref-type="bibr" rid="B46">Seel et al., 2014</xref>). Recent validation studies report root-mean-square errors (RMSEs) between 5<inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> and 10<inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> for walking tasks (<xref ref-type="bibr" rid="B16">Delp et al., 2007</xref>; <xref ref-type="bibr" rid="B26">Kok et al., 2017</xref>). However, such accuracy may be inadequate for clinical applications, including the physical assessment of disease severity, where the minimum detectable change is typically less than 5<inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="bibr" rid="B38">Peters et al., 2021</xref>).</p>
<p>In contrast, data-driven machine learning approaches have demonstrated superior performance by learning direct mappings from IMU data to joint kinematics without explicit modelling assumptions. Recent advances include convolutional neural networks (CNNs) for spatial feature extraction (<xref ref-type="bibr" rid="B22">Huang et al., 2018</xref>; <xref ref-type="bibr" rid="B56">Yi et al., 2022</xref>), long short-term memory (LSTM) networks for temporal dependency modelling (<xref ref-type="bibr" rid="B35">Mundt et al., 2020</xref>; <xref ref-type="bibr" rid="B25">Khant et al., 2023</xref>), attention-based architectures for adaptive feature weighting (<xref ref-type="bibr" rid="B54">Wang et al., 2020</xref>), and transformer models for global sequence modelling (<xref ref-type="bibr" rid="B27">Kwon et al., 2020</xref>). These methods have achieved RMSEs below 3&#xb0; in controlled settings (<xref ref-type="bibr" rid="B35">Mundt et al., 2020</xref>; <xref ref-type="bibr" rid="B29">Lea et al., 2017</xref>; <xref ref-type="bibr" rid="B27">Kwon et al., 2020</xref>), outperforming physics-based approaches across walking, running, and stair ambulation tasks. Furthermore, hybrid sensor fusion approaches combining IMUs with surface electromyography (sEMG) have shown promise for capturing neuromuscular dynamics (<xref ref-type="bibr" rid="B39">Phinyomark et al., 2018</xref>; <xref ref-type="bibr" rid="B12">Chen et al., 2018</xref>).</p>
<p>Despite these impressive results, purely data-driven models face critical limitations that hinder their translation to clinical practice and wearable deployment. First, they lack physical interpretability&#x2013;operating as black boxes without guarantees of biomechanical consistency, which raises concerns for safety-critical applications such as prosthetic control (<xref ref-type="bibr" rid="B53">Tucker et al., 2020</xref>). Second, state-of-the-art architectures such as transformers demand substantial computational resources (<inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>6</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> FLOPs per frame and hundreds of kilobytes (KB) of parameters) (<xref ref-type="bibr" rid="B27">Kwon et al., 2020</xref>), rendering them impractical for real-time inference on resource-constrained edge devices such as microcontroller units (MCUs) embedded in wearable systems, where power budgets are typically limited to milliwatts (<xref ref-type="bibr" rid="B28">Lane et al., 2015</xref>; <xref ref-type="bibr" rid="B43">Reddi et al., 2020</xref>). Finally, their dependence on large labelled datasets obtained from expensive motion capture systems limits scalability and accessibility for clinical researchers.</p>
<p>In recent years, physics-informed neural networks (PINNs) have emerged as a framework that integrates physical laws with neural networks (<xref ref-type="bibr" rid="B41">Raissi et al., 2019</xref>; <xref ref-type="bibr" rid="B23">Karniadakis et al., 2021</xref>). By embedding physical laws, such as Newton-Euler equations, kinematic constraints, or conservation principles, into neural network architectures or loss functions, PINNs achieve improved data efficiency and physical consistency (<xref ref-type="bibr" rid="B14">Cuomo et al., 2022</xref>). Recent applications in fluid dynamics (<xref ref-type="bibr" rid="B42">Raissi et al., 2020</xref>), structural mechanics (<xref ref-type="bibr" rid="B57">Zhang et al., 2022</xref>), and robotics (<xref ref-type="bibr" rid="B55">Westenbroek et al., 2022</xref>) have demonstrated that physics-informed models can match or exceed purely data-driven approaches with orders of magnitude fewer parameters. However, despite growing interest in biomechanics applications (<xref ref-type="bibr" rid="B30">Linka et al., 2021</xref>), PINNs have rarely been applied to IMU-based joint kinematics estimation, and no existing work has systematically addressed the computational efficiency requirements for edge deployment in wearable systems.</p>
<p>To address these gaps, this study introduces OrientationNN, a lightweight physics-informed neural network that integrates orientation-based kinematic constraints with compact multi-layer perceptron (MLP) subnetworks for real-time lower limb joint angle estimation from IMU data. Unlike purely data-driven models that learn arbitrary input-output mappings, OrientationNN explicitly encodes the rotational relationships between adjacent body segments using learnable rotation matrices that represent sensor-to-segment calibrations, combined with dynamic MLP modules that capture subject-specific non-rigid motion artifacts. This hybrid architecture preserves biomechanical interpretability while achieving computational efficiency through modular, joint-specific processing with minimal parameter overhead.</p>
<p>Our research objectives are:<list list-type="order">
<list-item>
<p>To develop a lightweight physics-informed neural network (OrientationNN) that integrates orientation-based physical constraints with a compact MLP architecture for accurate (RMSEs below 5&#xb0;) and efficient (model size under 20&#xa0;KB) estimation of lower-limb joint kinematics from IMU data.</p>
</list-item>
<list-item>
<p>To analyse and compare the error distribution of the proposed OrientationNN and the physics-based biomechanics model (i.e., OpenSense, an IMU-driven inverse kinematics toolbox), providing insights into model behavior and optimisation throughout the gait cycle.</p>
</list-item>
<list-item>
<p>To ensure efficient, real-time, and edge-deployable kinematics estimation by reducing computational cost and parameter dependency.</p>
</list-item>
</list>
</p>
</sec>
<sec sec-type="methods" id="s2">
<label>2</label>
<title>Methods</title>
<sec id="s2-1">
<label>2.1</label>
<title>Problem statement</title>
<p>Our objective was to provide a high-accuracy and real-time automated estimation of lower limb joint kinematics using wearable IMU sensors.</p>
<p>We used a publicly available treadmill walking dataset comprising recordings from 14 healthy adults (7 males and 7 females), each conducting five approximately 7-min walking trials at a self-selected speed (<xref ref-type="bibr" rid="B5">Bailey et al., 2021</xref>). The dataset included measurements from eight IMU sensors (Xsens) mounted on the trunk, lower back, left and right thighs, left and right shanks, and left and right feet. In this study, the trunk IMU was excluded from modelling by following the previous work (<xref ref-type="bibr" rid="B32">Ma et al., 2025</xref>). The signals were sampled at 60&#xa0;Hz, resulting in around 25000 data points per 7-min trial, with variations depending on trial durations.</p>
<p>In addition, the dataset provides lower limb joint angles computed through OpenSim&#x2019;s inverse kinematics (IK) based on marker trajectory data. As OpenSim is a validated and widely accepted framework for estimating joint kinematics (<xref ref-type="bibr" rid="B21">Holzbaur et al., 2005</xref>), the OpenSim-derived joint angles were used as the reference standard for evaluating the proposed neural network model (<xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Schematic of the experimental setup and data acquisition system. IMU sensors were attached to the lower back, thighs, shanks, and feet to record lower limb kinematics, while a marker-based optical motion capture system was used to obtain the ground truth joint angles for model evaluation.</p>
</caption>
<graphic xlink:href="fbioe-13-1737916-g001.tif">
<alt-text content-type="machine-generated">Diagram illustrating the process of motion analysis. On the left, an AI model with interconnected nodes. In the center, a walking figure with IMU and marker points. Arrows indicate IMU orientations and marker trajectories leading to validation. On the right, a skeletal representation with the OpenSim logo under the label &#x22;IK Tool.&#x22;</alt-text>
</graphic>
</fig>
<p>Following data quality screening based on inter-system synchronisation accuracy, recordings with synchronization errors exceeding 50&#xa0;ms were excluded. As a result, recordings from ten participants were retained for further analysis. Both IMU-driven (OpenSense) and optoelectronic-driven (OpenSim) inverse kinematics solutions were used for model comparison. The IMU data were synchronised with the marker-based data in MATLAB to eliminate signal delay.</p>
<p>Lower limb joint kinematics, including hip flexion, hip adduction, hip rotation, knee flexion, ankle dorsiflexion, and ankle inversion, were estimated. Neural network models were trained and evaluated under an intra-subject scenario, in which model training and testing were performed using data from the same individual. For each participant, the dataset was partitioned into training (60%), validation (20%), and test (20%) subsets. The training data were used to fit the model, the validation data to optimise the network architecture and hyperparameters, and the test data to evaluate final performance.</p>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>OrientationNN</title>
<sec id="s2-2-1">
<label>2.2.1</label>
<title>Model summary</title>
<p>We proposed OrientationNN, a lightweight architecture that integrates orientation-based physical information with compact multi-layer perceptrons (MLPs), ensuring computational efficiency and adherence to segmental constraints. The modelling process began by grouping adjacent IMU orientations as shown in <xref ref-type="fig" rid="F2">Figure 2b</xref> to calculate the corresponding joint angles. The model takes the orientations of two adjacent segments as input and estimates their relative rotation matrix, representing the corresponding joint motion. (<xref ref-type="fig" rid="F2">Figure 2c</xref>). These joint motion matrices were converted into Euler angles to compute the loss function, as segmental kinematics are conventionally represented using Euler angles.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>OrientationNN architecture. <bold>(a)</bold> Model input. <bold>(b)</bold> Grouping of adjacent IMU orientations for joint kinematics estimation. <bold>(c)</bold> Computation flow from relative rotation matrices to Euler angles.</p>
</caption>
<graphic xlink:href="fbioe-13-1737916-g002.tif">
<alt-text content-type="machine-generated">Diagram illustrating a computational model in three sections. Section (a) shows a hierarchical structure with components labeled as Pelvis, ThighR, ThighL, ShankR, ShankL, FootR, and FootL. Section (b) lists joints as Hip, Knee, and Foot with number codes. Section (c) depicts network flows with RA and RB leading to &#x394;R pre, R pre, RA RB, and &#x394;R post, and calculating Loss eul from Euclidean distance between R pred and R true.</alt-text>
</graphic>
</fig>
<p>The proposed model incorporates joint-specific dynamic modules. Each joint employs a tiny MLP subnetwork (input &#x3d; 3, hidden &#x3d; 32, output &#x3d; 3) to predict dynamic offsets <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, which are combined with learnable static parameters (<inline-formula id="inf9">
<mml:math id="m9">
<mml:mrow>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf10">
<mml:math id="m10">
<mml:mrow>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>) to form rotation matrix outputs, as described in the last section. Training is conducted for 100 epochs using the Adam optimizer with a learning rate of 0.005 (<xref ref-type="bibr" rid="B1">Kingma and Ba, 2014</xref>). The weighted EulerXYZ loss is selected as the loss function to enhance biomechanical consistency and joint-specific precision (<xref ref-type="bibr" rid="B11">Chen et al., 2011</xref>).</p>
</sec>
<sec id="s2-2-2">
<label>2.2.2</label>
<title>Basic module</title>
<p>Assume that we have two adjacent segment orientation <inline-formula id="inf11">
<mml:math id="m11">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf12">
<mml:math id="m12">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>, then the joint rotation matrix could be calculated as shown in <xref ref-type="disp-formula" rid="e1">Equation 1</xref>:<disp-formula id="e1">
<mml:math id="m13">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">joint</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msubsup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
</p>
<p>where <inline-formula id="inf13">
<mml:math id="m14">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">joint</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the relative joint rotation matrix which corresponds to <inline-formula id="inf14">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf15">
<mml:math id="m16">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
</sec>
<sec id="s2-2-3">
<label>2.2.3</label>
<title>Learnable static module</title>
<p>However, only IMU orientations are measured directly in the study. Thus, we added two learnable static rotation matrices to replace the sensor to segment orientations (<xref ref-type="bibr" rid="B35">Mundt et al., 2020</xref>). Then the joint rotation matrix can be calculated as shown in <xref ref-type="disp-formula" rid="e2">Equation 2</xref>:<disp-formula id="e2">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">joint</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msubsup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msubsup>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(2)</label>
</disp-formula>
</p>
<p>where <inline-formula id="inf16">
<mml:math id="m18">
<mml:mrow>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf17">
<mml:math id="m19">
<mml:mrow>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are learnable rotation matrix to function as sensor-to-segment orientations.</p>
</sec>
<sec id="s2-2-4">
<label>2.2.4</label>
<title>Dynamic module</title>
<p>To further help the model approximate the joint motion better, we also introduced an extra dynamic module containing tiny MLP models. Two dynamics orientation matrices were first calculated as shown in <xref ref-type="disp-formula" rid="e3">Equations 3</xref>, <xref ref-type="disp-formula" rid="e4">4</xref>:<disp-formula id="e3">
<mml:math id="m20">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>Exp</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mtext>MLP</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mtext>Log</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(3)</label>
</disp-formula>
<disp-formula id="e4">
<mml:math id="m21">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>Exp</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mtext>MLP</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mtext>Log</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(4)</label>
</disp-formula>
</p>
<p>where <inline-formula id="inf18">
<mml:math id="m22">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> and <inline-formula id="inf19">
<mml:math id="m23">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> are dynamics rotation matrices, <inline-formula id="inf20">
<mml:math id="m24">
<mml:mrow>
<mml:mtext>Exp</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>.</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the function mapping SO (3) to so (3), and <inline-formula id="inf21">
<mml:math id="m25">
<mml:mrow>
<mml:mtext>Log</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>.</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the function mapping so (3) to SO (3) (<xref ref-type="bibr" rid="B48">Sol&#xe0; et al., 2018</xref>). <inline-formula id="inf22">
<mml:math id="m26">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the special orthogonal group of 3D rotation matrices, while <inline-formula id="inf23">
<mml:math id="m27">
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mi>o</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> denotes its corresponding Lie algebra of 3D rotation vectors.</p>
</sec>
<sec id="s2-2-5">
<label>2.2.5</label>
<title>Concatenation module</title>
<p>Then, the joint angles can be calculated as shown in <xref ref-type="disp-formula" rid="e5">Equations 5</xref>, <xref ref-type="disp-formula" rid="e6">6</xref>:<disp-formula id="e5">
<mml:math id="m28">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">joint</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">pre</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>A</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msubsup>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2032;</mml:mo>
</mml:mrow>
</mml:msubsup>
<mml:mi mathvariant="normal">&#x394;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">post</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(5)</label>
</disp-formula>
<disp-formula id="e6">
<mml:math id="m29">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>l</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mtext>rot</mml:mtext>
<mml:mn>2</mml:mn>
<mml:mtext>eul</mml:mtext>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">joint</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
<label>(6)</label>
</disp-formula>
</p>
<p>where <inline-formula id="inf24">
<mml:math id="m30">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>u</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> are Euler angles of the corresponding joint, <inline-formula id="inf25">
<mml:math id="m31">
<mml:mrow>
<mml:mtext>rot</mml:mtext>
<mml:mn>2</mml:mn>
<mml:mtext>eul</mml:mtext>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>.</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> is the function which converts a rotation matrix into Euler angles following the Z-Y-X sequence. This corresponds to the rotation convention adopted by OpenSim, ensuring a fair comparison between models <xref ref-type="bibr" rid="B16">Delp et al. (2007)</xref>.</p>
</sec>
<sec id="s2-2-6">
<label>2.2.6</label>
<title>Weighted euler loss</title>
<p>The loss function is calculated as shown in <xref ref-type="disp-formula" rid="e7">Equation 7</xref>:<disp-formula id="e7">
<mml:math id="m32">
<mml:mrow>
<mml:mi>L</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>12</mml:mn>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mi>e</mml:mi>
<mml:mi>u</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>l</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(7)</label>
</disp-formula>
</p>
<p>where <inline-formula id="inf26">
<mml:math id="m33">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the weight of the <inline-formula id="inf27">
<mml:math id="m34">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>t</mml:mi>
<mml:mi>h</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> channel, which was determined by motion range.</p>
</sec>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Baseline machine learning models</title>
<p>For comparison with our proposed model OrientationNN, we adopted multiple neural network models from literature. The model architectures and hyperparameters are optimised using a framework named Optuna (<xref ref-type="bibr" rid="B2">Akiba et al., 2019</xref>), which adopted a Bayesian method to get the optimal architecture and hyper parameter for each model. The optimal hyperparameters of other machine learning models are listed in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Model architectures and training hyperparameters.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Model</th>
<th align="left">Architecture</th>
<th align="center">Learning rate</th>
<th align="center">Epochs</th>
<th align="center">Optimizer</th>
<th align="center">Loss</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">MLP</td>
<td align="left">2<inline-formula id="inf28">
<mml:math id="m35">
<mml:mrow>
<mml:mo>&#xd7;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 128 fully connected layers (ReLU &#x2b; dropout 0.2)</td>
<td align="center">0.01</td>
<td align="center">100</td>
<td align="center">Adam</td>
<td align="center">L1 loss</td>
</tr>
<tr>
<td align="left">LSTM</td>
<td align="left">1-Layer LSTM (dropout 0.2) &#x2b; linear</td>
<td align="center">0.01</td>
<td align="center">100</td>
<td align="center">Adam</td>
<td align="center">L1 loss</td>
</tr>
<tr>
<td align="left">CNN</td>
<td align="left">Conv2d (<inline-formula id="inf29">
<mml:math id="m36">
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, pad<inline-formula id="inf30">
<mml:math id="m37">
<mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) &#x2b; ReLU &#x2b; Dropout (0.2) &#x2b; flatten &#x2b; linear</td>
<td align="center">0.01</td>
<td align="center">100</td>
<td align="center">Adam</td>
<td align="center">Huber loss</td>
</tr>
<tr>
<td align="left">Transformer</td>
<td align="left">Linear &#x2b;3<inline-formula id="inf31">
<mml:math id="m38">
<mml:mrow>
<mml:mo>&#xd7;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> transformer EncoderLayer (<inline-formula id="inf32">
<mml:math id="m39">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mtext>model</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>64</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf33">
<mml:math id="m40">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mtext>head</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf34">
<mml:math id="m41">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mtext>ff</mml:mtext>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>256</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, dropout<inline-formula id="inf35">
<mml:math id="m42">
<mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) &#x2b; flatten &#x2b; linear</td>
<td align="center">0.005</td>
<td align="center">100</td>
<td align="center">Adam</td>
<td align="center">L1 loss</td>
</tr>
<tr>
<td align="left">OrientationNN</td>
<td align="left">Per-joint MLP (hidden<inline-formula id="inf36">
<mml:math id="m43">
<mml:mrow>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>32</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) &#x2b; learnable orientations</td>
<td align="center">0.005</td>
<td align="center">100</td>
<td align="center">Adam</td>
<td align="center">Euler loss</td>
</tr>
</tbody>
</table>
</table-wrap>
<sec id="s2-3-1">
<label>2.3.1</label>
<title>MLP</title>
<p>The MLP network comprises two fully connected hidden layers, each containing 128 neurons with rectified linear unit (ReLU) activation functions. To prevent overfitting, a dropout layer with a rate of 0.2 is applied after each hidden layer. The input layer receives 63 features, and the output layer produces 12 joint angle estimations. Model parameters are optimized using the Adam optimizer with a learning rate of 0.01 and a weight decay of <inline-formula id="inf37">
<mml:math id="m44">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>. The network is trained for 100 epochs with the mean absolute error (L1Loss) as the objective function.</p>
</sec>
<sec id="s2-3-2">
<label>2.3.2</label>
<title>LSTM</title>
<p>The LSTM model consists of a single recurrent layer with 128 hidden units and an input size of 63. A dropout rate of 0.2 is applied to the LSTM outputs to reduce overfitting. The final fully connected layer maps the 128-dimensional hidden representation to 12 output joint angles. Training is performed for 100 epochs using the Adam optimizer (learning rate &#x3d; 0.01, weight decay &#x3d; <inline-formula id="inf38">
<mml:math id="m45">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>) and the L1 Loss function (MAE).</p>
</sec>
<sec id="s2-3-3">
<label>2.3.3</label>
<title>CNN</title>
<p>The CNN model performs spatiotemporal feature extraction using a two-dimensional convolutional layer (Conv2D) with 64 filters and a kernel size of 3, followed by ReLU activation and a dropout rate of 0.2. The feature maps are then flattened and passed through a fully connected layer that outputs 12 joint angle predictions. The input tensor has dimensions of <inline-formula id="inf39">
<mml:math id="m46">
<mml:mrow>
<mml:mn>7</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>3</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, corresponding to seven IMUs with three-axis features. Model training uses the Adam optimizer (learning rate &#x3d; 0.01, weight decay &#x3d; <inline-formula id="inf40">
<mml:math id="m47">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>) for 100 epochs with the Huber Loss function.</p>
</sec>
<sec id="s2-3-4">
<label>2.3.4</label>
<title>Transformer</title>
<p>The Transformer-based model begins with a linear projection layer that maps the input to a 64-dimensional embedding space. This embedding is processed by three Transformer encoder layers with a model dimension of 64, two attention heads, a feedforward dimension of 256, and a dropout rate of 0.1. The output sequence is flattened and passed through a final linear layer to produce 12 joint angle outputs. Training is performed for 100 epochs using the Adam optimizer (learning rate &#x3d; 0.005, weight decay &#x3d; <inline-formula id="inf41">
<mml:math id="m48">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>4</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>) and L1 Loss (MAE).</p>
</sec>
</sec>
<sec id="s2-4">
<label>2.4</label>
<title>Model evaluation</title>
<sec id="s2-4-1">
<label>2.4.1</label>
<title>Performance metrics</title>
<p>Model accuracy was assessed by using the root mean squared error (RMSE), which is defined as follows: <inline-formula id="inf42">
<mml:math id="m49">
<mml:mrow>
<mml:mtext>RMSE</mml:mtext>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:msubsup>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo>&#x302;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
</inline-formula>, where <inline-formula id="inf43">
<mml:math id="m50">
<mml:mrow>
<mml:mover accent="true">
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mo>&#x304;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> is the mean of the actual values. To evaluate the model&#x2019;s suitability for edge deployment, we measured FLOPs and memory usage, using an NVIDIA RTX 3050 laptop GPU, which approximates the performance of widely used edge devices. Furthermore, to enable real-time joint kinematics estimation for feedback and to provide reliable input for assistive device control, several specific performance criteria were defined: <list list-type="simple">
<list-item>
<p>1. The estimation of joint angles should achieve a RMSE of less than 5<inline-formula id="inf44">
<mml:math id="m51">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> to be considered reliable for detecting clinically meaningful changes in ambulatory joint angle differences (<xref ref-type="bibr" rid="B8">Bian et al., 2024</xref>; <xref ref-type="bibr" rid="B7">Berner et al., 2020</xref>; <xref ref-type="bibr" rid="B36">N&#xfc;esch et al., 2017</xref>).</p>
</list-item>
</list>
<list list-type="simple">
<list-item>
<p>2. Joint angle estimation should be feasible on resource-limited wearable devices; thus, the memory requirements and computational complexity of the solutions should be minimized. A practical efficiency threshold of <inline-formula id="inf45">
<mml:math id="m52">
<mml:mrow>
<mml:mo>&#x2264;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>20&#xa0;KB was adopted, considering that typical embedded platforms used in IMU-based gait systems (e.g., STM32 or ESP32 microcontrollers) provide only about 100&#x2013;512&#xa0;KB of RAM. This constraint ensures the model can be deployed entirely on-device without external memory access, while maintaining flexibility for future scaling.</p>
</list-item>
</list>
</p>
</sec>
<sec id="s2-4-2">
<label>2.4.2</label>
<title>Statistics</title>
<p>To evaluate the statistical significance of performance differences between OrientationNN and the baseline models, we conducted an independent samples t-test. Before performing the t-test, we assessed the normality of the performance metrics to ensure that the data met the assumption of a normal distribution. For each model (MLP, CNN, LSTM, Transformer, and OrientationNN), we trained 10 separate instances using optimised architectures and hyperparameters. The mean test performance across 10 participants was recorded for each training instance. This resulted in 10 independent performance values per model (N &#x3d; 10), which were then compared. We used MATLAB&#x2019;s t-test function to conduct a two-tailed independent samples t-test <inline-formula id="inf46">
<mml:math id="m53">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>&#x3b1;</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.05</mml:mn>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> to compare the performance of OrientationNN against each baseline model. The null hypothesis stated that there was no significant difference between OrientationNN and the baseline models.</p>
</sec>
</sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<sec id="s3-1">
<label>3.1</label>
<title>OrientationNN vs. OpenSense</title>
<p>
<xref ref-type="fig" rid="F3">Figure 3</xref> shows that OrientationNN achieved significantly lower RMSEs than the OpenSense inverse kinematics approach across all 12 joint kinematic variables (p <inline-formula id="inf47">
<mml:math id="m54">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001). The average RMSEs of OrientationNN were below 5&#xb0;, whereas those of OpenSense ranged between 5&#xb0; and 10&#xb0;.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Comparison of average RMSE between OrientationNN and OpenSense across 12 lower limb joint angles.</p>
</caption>
<graphic xlink:href="fbioe-13-1737916-g003.tif">
<alt-text content-type="machine-generated">Bar chart comparing RMSE in degrees for OrientationNN (blue) and Opensense (orange) across various joint angles. Opensense consistently shows higher RMSE values. Significant differences are marked with asterisks.</alt-text>
</graphic>
</fig>
<p>The intra-subject evaluation demonstrated that OrientationNN achieved significantly lower RMSEs across all 12 kinematic variables compared with other machine learning models (p <inline-formula id="inf48">
<mml:math id="m55">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001). Specifically, the average errors (mean &#xb1; SD) for each variable were as follows: HipFlexR (2.59&#xb0; &#xb1; 0.08&#xb0;), HipAddR (2.19&#xb0; &#xb1; 0.15&#xb0;), HipRotR (2.07&#xb0; &#xb1; 0.13&#xb0;), KneeFlexR (3.22&#xb0; &#xb1; 0.12&#xb0;), AnkleFlexR (3.11&#xb0; &#xb1; 0.06&#xb0;), AnkleInvR (4.01&#xb0; &#xb1; 0.12&#xb0;), HipFlexL (3.19&#xb0; &#xb1; 0.06&#xb0;), HipAddL (2.42&#xb0; &#xb1; 0.13&#xb0;), HipRotL (2.25&#xb0; &#xb1; 0.22&#xb0;), KneeFlexL (4.62&#xb0; &#xb1; 0.14&#xb0;), AnkleFlexL (3.79&#xb0; &#xb1; 0.09&#xb0;), and AnkleInvL (4.06&#xb0; &#xb1; 0.18&#xb0;). These results indicate that OrientationNN provides clinically meaningful accuracy (RMSE <inline-formula id="inf49">
<mml:math id="m56">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 5&#xb0;) for all lower limb joints.</p>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Joint angle profiles</title>
<p>
<xref ref-type="fig" rid="F4">Figure 4</xref> illustrates the estimated lower limb joint angles over a complete gait cycle (0%&#x2013;100%) for 12 kinematic variables, including hip, knee, and ankle joints on both right and left limbs. Ground-truth trajectories obtained from optical motion capture are shown for reference (Baseline, cyan).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Joint angle estimation results over the whole gait cycle. Shaded regions represent the across-subject standard deviation at each normalized time point, illustrating the variability of joint-angle estimates across participants. In the time-normalized gait cycle (0%&#x2013;100%), heel strike corresponds to 0% and 100%, while toe-off occurs around 50%&#x2013;60%.</p>
</caption>
<graphic xlink:href="fbioe-13-1737916-g004.tif">
<alt-text content-type="machine-generated">Graphs depict joint angles during the gait cycle for the right and left sides. Each panel compares Baseline, OrientationNN, and Opensense data. Plots include HipFlex, HipAdd, HipRot, KneeFlex, AnkleFlex, and AnkleInv angles over a 0 to 100 percent gait cycle. Different colored lines represent each method with shaded areas, indicating variability.</alt-text>
</graphic>
</fig>
<p>Across all twelve kinematic variables, the error distributions differed between the OrientationNN and OpenSense. For OrientationNN, small deviations from the baseline were mainly observed around heel strike (0%&#x2013;10%) and toe-off (50%&#x2013;60%), corresponding to rapid transitions in segment motion. Most local fluctuations were mild (<inline-formula id="inf50">
<mml:math id="m57">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>3&#xb0;) and symmetrically distributed across both limbs. For OpenSense, larger deviations occurred primarily during mid-swing (70%&#x2013;90%) in hip flexion and knee flexion, and near heel strike in ankle dorsiflexion and inversion. The errors reached up to 8&#xb0;&#x2013;10&#xb0;, showing clear phase-dependent drift.</p>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Model efficiency analysis</title>
<p>The bubble chart (<xref ref-type="fig" rid="F5">Figure 5</xref>) illustrates the trade-off between computational complexity (FLOPs) and prediction error (RMSE) for different neural network models. The bubble size represents the model parameter size (in KB). The proposed OrientationNN achieved an RMSE of 3.13<inline-formula id="inf51">
<mml:math id="m58">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> with only <inline-formula id="inf52">
<mml:math id="m59">
<mml:mrow>
<mml:mn>4.9</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> FLOPs and a model size of 10.8 KB, indicating the lowest computational cost among all models. In contrast, the Transformer achieved the smallest error (2.61<inline-formula id="inf53">
<mml:math id="m60">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>) but required <inline-formula id="inf54">
<mml:math id="m61">
<mml:mrow>
<mml:mn>2.1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>6</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> FLOPs and 609&#xa0;KB of parameters. The LSTM model reached an RMSE of 2.67<inline-formula id="inf55">
<mml:math id="m62">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> with <inline-formula id="inf56">
<mml:math id="m63">
<mml:mrow>
<mml:mn>1.96</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>5</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> FLOPs and 392 KB, while MLP and CNN produced errors of 2.95<inline-formula id="inf57">
<mml:math id="m64">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> and 2.97<inline-formula id="inf58">
<mml:math id="m65">
<mml:mrow>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, respectively, at higher or comparable complexity levels.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Computational efficiency of baseline models and OrientationNN.</p>
</caption>
<graphic xlink:href="fbioe-13-1737916-g005.tif">
<alt-text content-type="machine-generated">Bubble chart depicting RMSE (degrees) against calculation complexity (FLOPs) for different neural network models: MLP, LSTM, CNN, Transformer, OrientationNN, and 100KB. Larger bubbles represent higher complexity.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>This work highlights a significant advancement in IMU-based joint angle estimation, delivering three main contributions. First, we proposed OrientationNN, a lightweight AI model which integrates physics information with tiny MLP, achieving clinical significance. Our results indicate its superiority over the physics model-based solution. Second, we revealed the error distribution of both proposed AI model and OpenSense model, providing insight into model design focus over the whole gait cycle. Finally, by introducing physics information into the AI model, we significantly reduced the model reliance on computational resources, enabling more cost-effective applications.</p>
<sec id="s4-1">
<label>4.1</label>
<title>OrientationNN vs. OpenSense</title>
<p>Compared with the traditional physics-based OpenSense approach, OrientationNN demonstrated superior accuracy and stability across all joint angle channels. The experimental results showed that the average RMSEs of OrientationNN were below 5&#xb0; for all 12 gait kinematic channels, significantly lower than those of OpenSense (5&#xb0;&#x2013;10&#xb0;) with statistical significance (p <inline-formula id="inf59">
<mml:math id="m66">
<mml:mrow>
<mml:mo>&#x3c;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> 0.001). This improvement primarily stems from the integration of orientation-based physical information into the neural network, allowing the model to capture intrinsic joint dynamics rather than relying solely on data-driven feature mapping. Clinically, achieving an RMSE below 5&#xb0; indicates that OrientationNN meets the accuracy requirements for rehabilitation monitoring and gait assessment (<xref ref-type="bibr" rid="B34">McGinley et al., 2009</xref>).</p>
<p>From an algorithmic perspective, OpenSense relies on inverse kinematics optimization using inertial sensor signals, whose performance is easily affected by sensor drift, noise accumulation, and misalignment errors (<xref ref-type="bibr" rid="B33">McConnochie et al., 2025</xref>). In contrast, OrientationNN directly learns the mapping between IMU orientations and joint rotations in an end-to-end manner, effectively mitigating cumulative errors introduced by optimization. Furthermore, the introduction of the weighted Euler-angle loss enhances the model&#x2019;s sensitivity to biomechanically critical directions (<xref ref-type="bibr" rid="B31">Liu and Popovi&#x107;, 2002</xref>), such as flexion/extension and rotation.</p>
</sec>
<sec id="s4-2">
<label>4.2</label>
<title>Joint kinematics profiles</title>
<p>The gait cycle analysis further revealed that OrientationNN achieved superior phase responsiveness in kinematic estimation. When compared with ground-truth trajectories obtained from optical motion capture, the predicted joint angle curves from OrientationNN exhibited high consistency across the entire gait cycle, with minimal phase lag in key movements such as hip flexion, abduction, and rotation. Conversely, OpenSense showed substantial deviations and fluctuations, particularly in hip rotation and ankle inversion during highly dynamic phases (<xref ref-type="bibr" rid="B33">McConnochie et al., 2025</xref>; <xref ref-type="bibr" rid="B49">Suvorkin et al., 2024</xref>).</p>
<p>In terms of error distribution, OrientationNN&#x2019;s deviations were mainly concentrated in transition phases (0%&#x2013;10% heel strike, 50%&#x2013;60% toe-off), which are characterized by rapid angular acceleration and high inertial variability (<xref ref-type="bibr" rid="B9">Burnfield, 2010</xref>). Even in these challenging segments, local errors remained acceptable, and the distribution was symmetric between limbs, indicating robust inter-limb consistency. In contrast, OpenSense exhibited larger phase-dependent drift, with errors up to 8&#xb0;&#x2013;10&#xb0; during mid-swing (70%&#x2013;90%) for hip and knee flexion.</p>
<p>These results demonstrate that OrientationNN not only improves overall accuracy but also enhances the smoothness and physiological plausibility of the estimated trajectories. Such stability is crucial for clinical applications including gait abnormality detection and neurorehabilitation evaluation (<xref ref-type="bibr" rid="B37">Patel et al., 2012</xref>), where reliable and continuous kinematic signals are required for real-time feedback and control.</p>
</sec>
<sec id="s4-3">
<label>4.3</label>
<title>Model efficiency analysis</title>
<p>The efficiency analysis highlights the strong trade-off achieved by OrientationNN between computational complexity and predictive performance. The proposed model achieved an average RMSE of <inline-formula id="inf60">
<mml:math id="m67">
<mml:mrow>
<mml:mn>3.13</mml:mn>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> with only <inline-formula id="inf61">
<mml:math id="m68">
<mml:mrow>
<mml:mn>4.9</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> FLOPs and a model size of 10.8 KB, which is substantially lower than other neural architectures. Although the Transformer achieved slightly lower error <inline-formula id="inf62">
<mml:math id="m69">
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mn>2.61</mml:mn>
<mml:mo>&#xb0;</mml:mo>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, it required over three orders of magnitude more computation (<inline-formula id="inf63">
<mml:math id="m70">
<mml:mrow>
<mml:mn>2.1</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>1</mml:mn>
<mml:msup>
<mml:mrow>
<mml:mn>0</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>6</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> FLOPs) and memory resources, making it impractical for real-time deployment on edge devices (<xref ref-type="bibr" rid="B18">Fedus et al., 2022</xref>).</p>
<p>The lightweight advantage of OrientationNN arises from its modular design: each joint is modeled by a compact MLP subnetwork combined with learnable static rotation matrices, preserving biomechanical interpretability while minimizing parameter redundancy (<xref ref-type="bibr" rid="B20">Han et al., 2015</xref>). Moreover, the dynamic rotation compensation module further refines non-linear rotational behavior without significantly increasing computational cost. These features make OrientationNN particularly suitable for deployment on wearable devices, rehabilitation robots (<xref ref-type="bibr" rid="B17">Dollar and Herr, 2008</xref>), and prosthetic systems, where low power consumption and real-time feedback are critical.</p>
</sec>
<sec id="s4-4">
<label>4.4</label>
<title>Limitation and future work</title>
<p>Although OrientationNN achieved strong performance in both accuracy and computational efficiency, several limitations remain. First, this study was conducted using treadmill walking data from healthy adults under controlled laboratory conditions. The model&#x2019;s robustness under more complex scenarios, such as outdoor walking, uneven terrain (<xref ref-type="bibr" rid="B19">Hamacher et al., 2011</xref>), or sensor displacement, has yet to be validated (<xref ref-type="bibr" rid="B40">Prisco et al., 2025</xref>). Moreover, the current framework focuses solely on joint kinematics estimation, without explicitly modelling underlying joint dynamics such as torques or interaction forces (<xref ref-type="bibr" rid="B52">Thelen and Anderson, 2006</xref>). Although OrientationNN reduces dependence on explicit calibration by learning static rotation matrices, a minimal sensor-to-segment calibration is still required for real-time deployment, and these orientations cannot be entirely pre-trained. This study was conducted using treadmill walking data collected under controlled laboratory conditions. Although this setup allows reliable baseline evaluation, real-world variability such as sensor noise, placement changes, and environmental disturbances may affect model performance. Future work will therefore focus on validating the proposed method under more diverse conditions, including data augmentation and real-world walking scenarios, before extending it to pathological or rehabilitation applications.</p>
<p>Future work will focus on several significant directions. First, we plan to extend the current framework from kinematic estimation to dynamic modelling (<xref ref-type="bibr" rid="B45">Schwartz et al., 2008</xref>), enabling the prediction of joint moments and interaction forces directly from IMU data. Second, we aim to deploy and validate OrientationNN on real embedded and edge devices, assessing its real-time performance, energy efficiency, and latency in practical scenarios such as wearable gait monitoring and robotic assistance (<xref ref-type="bibr" rid="B50">Sze et al., 2017</xref>). Moreover, the framework can be integrated with more powerful machine learning models to achieve superior prediction accuracy over current baseline AI models.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This study presented OrientationNN, a lightweight and physics-informed neural network framework for IMU-based lower limb joint kinematics estimation. By embedding orientation-based physical constraints within a compact MLP architecture, the proposed model achieved both high estimation accuracy and biomechanical interpretability. Experimental results demonstrated that OrientationNN outperformed the traditional physics-based OpenSense method, achieving average RMSEs below 5&#xb0; across twelve gait kinematic channels, thus meeting clinical relevance for gait assessment (<xref ref-type="bibr" rid="B4">Armand et al., 2016</xref>). Additionally, OrientationNN achieved this performance with substantially reduced computational complexity and parameter size, making it well suited for deployment on edge and wearable devices (<xref ref-type="bibr" rid="B51">Taylor et al., 2017</xref>).</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="s7">
<title>Author contributions</title>
<p>QB: Conceptualization, Data curation, Formal Analysis, Investigation, Methodology, Software, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review and editing. HW: Conceptualization, Formal Analysis, Investigation, Validation, Writing &#x2013; review and editing. KA: Investigation, Project administration, Resources, Supervision, Writing &#x2013; review and editing. ZD: Funding acquisition, Project administration, Resources, Supervision, Writing &#x2013; review and editing.</p>
</sec>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s10">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript. To polish the language.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1506794/overview">Filipa Jo&#xe3;o</ext-link>, University of Lisbon, Portugal</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3285235/overview">Munho Ryu</ext-link>, Jeonbuk National University, Republic of Korea</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3299042/overview">Diogo Ricardo</ext-link>, Instituto Polit&#xe9;cnico de Lisboa, Portugal</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B2">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Akiba</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Sano</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yanase</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Ohta</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Koyama</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Optuna: a next-generation hyperparameter optimization framework</article-title>,&#x201d; in <source>Proceedings of the 25th ACM SIGKDD international conference on knowledge discovery and data mining</source>, <fpage>2623</fpage>&#x2013;<lpage>2631</lpage>.</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Al-Amri</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Nicholas</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Button</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Sparkes</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Sheeran</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Davies</surname>
<given-names>J. L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Inertial measurement units for clinical movement analysis: reliability and concurrent validity</article-title>. <source>Sensors</source> <volume>18</volume>, <fpage>719</fpage>. <pub-id pub-id-type="doi">10.3390/s18030719</pub-id>
<pub-id pub-id-type="pmid">29495600</pub-id>
</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Armand</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Decoulon</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Bonnefoy-Mazure</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Gait analysis in children with cerebral palsy</article-title>. <source>EFORT Open Reviews</source> <volume>1</volume>, <fpage>448</fpage>&#x2013;<lpage>460</lpage>. <pub-id pub-id-type="doi">10.1302/2058-5241.1.000052</pub-id>
<pub-id pub-id-type="pmid">28698802</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bailey</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Uchida</surname>
<given-names>T. K.</given-names>
</name>
<name>
<surname>Nantel</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Graham</surname>
<given-names>R. B.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Validity and sensitivity of an inertial measurement unit-driven biomechanical model of motor variability for gait</article-title>. <source>Sensors</source> <volume>21</volume>, <fpage>7690</fpage>. <pub-id pub-id-type="doi">10.3390/s21227690</pub-id>
<pub-id pub-id-type="pmid">34833766</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Baker</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Gait analysis methods in rehabilitation</article-title>. <source>J. Neuroengineering Rehabilitation</source> <volume>3</volume>, <fpage>4</fpage>. <pub-id pub-id-type="doi">10.1186/1743-0003-3-4</pub-id>
<pub-id pub-id-type="pmid">16512912</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Berner</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Cockcroft</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Morris</surname>
<given-names>L. D.</given-names>
</name>
<name>
<surname>Louw</surname>
<given-names>Q.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Concurrent validity and within-session reliability of gait kinematics measured using an inertial motion capture system with repeated calibration</article-title>. <source>J. Bodyw. Mov. Ther.</source> <volume>24</volume>, <fpage>251</fpage>&#x2013;<lpage>260</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbmt.2020.06.008</pub-id>
<pub-id pub-id-type="pmid">33218520</pub-id>
</mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bian</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Castellani</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Shepherd</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Duan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Gait intention prediction using a lower-limb musculoskeletal model and long short-term memory neural networks</article-title>. <source>IEEE Trans. Neural Syst. Rehabilitation Eng.</source> <volume>32</volume>, <fpage>822</fpage>&#x2013;<lpage>830</lpage>. <pub-id pub-id-type="doi">10.1109/TNSRE.2024.3365201</pub-id>
<pub-id pub-id-type="pmid">38345960</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Burnfield</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Gait analysis: normal and pathological function</article-title>. <source>J. Sports Sci. Med.</source> <volume>9</volume>, <fpage>353</fpage>.</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cappozzo</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Della Croce</surname>
<given-names>U.</given-names>
</name>
<name>
<surname>Leardini</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Chiari</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Human movement analysis using stereophotogrammetry: part 1: theoretical background</article-title>. <source>Gait and Posture</source> <volume>21</volume>, <fpage>186</fpage>&#x2013;<lpage>196</lpage>. <pub-id pub-id-type="doi">10.1016/j.gaitpost.2004.01.010</pub-id>
<pub-id pub-id-type="pmid">15639398</pub-id>
</mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zhuang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Nie</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Xiao</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Learning a 3d human pose distance metric from geometric pose descriptor</article-title>. <source>IEEE Trans. Vis. Comput. Graph.</source> <volume>17</volume>, <fpage>1676</fpage>&#x2013;<lpage>1689</lpage>. <pub-id pub-id-type="doi">10.1109/TVCG.2010.272</pub-id>
<pub-id pub-id-type="pmid">21173458</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Xi</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Surface emg based continuous estimation of human lower limb joint angles by using deep belief networks</article-title>. <source>Biomed. Signal Process. Control</source> <volume>40</volume>, <fpage>335</fpage>&#x2013;<lpage>342</lpage>. <pub-id pub-id-type="doi">10.1016/j.bspc.2017.10.002</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cooper</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Sheret</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>McMillian</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Siliverdis</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Sha</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Hodgins</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2009</year>). <article-title>Inertial sensor-based knee flexion/extension angle estimation</article-title>. <source>J. Biomechanics</source> <volume>42</volume>, <fpage>2678</fpage>&#x2013;<lpage>2685</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2009.08.004</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cuomo</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Di Cola</surname>
<given-names>V. S.</given-names>
</name>
<name>
<surname>Giampaolo</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Rozza</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Raissi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Piccialli</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Scientific machine learning through physics&#x2013;informed neural networks: where we are and what&#x2019;s next</article-title>. <source>J. Sci. Comput.</source> <volume>92</volume>, <fpage>88</fpage>. <pub-id pub-id-type="doi">10.1007/s10915-022-01939-z</pub-id>
</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cutti</surname>
<given-names>A. G.</given-names>
</name>
<name>
<surname>Ferrari</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Garofalo</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Raggi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cappello</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ferrari</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>&#x2018;outwalk&#x2019;: a protocol for clinical gait analysis based on inertial and magnetic sensors</article-title>. <source>Med. and Biological Engineering and Computing</source> <volume>48</volume>, <fpage>17</fpage>&#x2013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1007/s11517-009-0545-x</pub-id>
<pub-id pub-id-type="pmid">19911214</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Delp</surname>
<given-names>S. L.</given-names>
</name>
<name>
<surname>Anderson</surname>
<given-names>F. C.</given-names>
</name>
<name>
<surname>Arnold</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Loan</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Habib</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>John</surname>
<given-names>C. T.</given-names>
</name>
<etal/>
</person-group> (<year>2007</year>). <article-title>Opensim: open-source software to create and analyze dynamic simulations of movement</article-title>. <source>IEEE Transactions Biomedical Engineering</source> <volume>54</volume>, <fpage>1940</fpage>&#x2013;<lpage>1950</lpage>. <pub-id pub-id-type="doi">10.1109/TBME.2007.901024</pub-id>
<pub-id pub-id-type="pmid">18018689</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dollar</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Herr</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Lower extremity exoskeletons and active orthoses: challenges and state-of-the-art</article-title>. <source>IEEE Trans. Robotics</source> <volume>24</volume>, <fpage>144</fpage>&#x2013;<lpage>158</lpage>. <pub-id pub-id-type="doi">10.1109/tro.2008.915453</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fedus</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zoph</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Shazeer</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Switch transformers: scaling to trillion parameter models with simple and efficient sparsity</article-title>. <source>J. Mach. Learn. Res.</source> <volume>23</volume>, <fpage>1</fpage>&#x2013;<lpage>39</lpage>.</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hamacher</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Singh</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Van Die&#xeb;n</surname>
<given-names>J. H.</given-names>
</name>
<name>
<surname>Heller</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Taylor</surname>
<given-names>W. R.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Kinematic measures for assessing gait stability in elderly individuals: a systematic review</article-title>. <source>J. R. Soc. Interface</source> <volume>8</volume>, <fpage>1682</fpage>&#x2013;<lpage>1698</lpage>. <pub-id pub-id-type="doi">10.1098/rsif.2011.0416</pub-id>
<pub-id pub-id-type="pmid">21880615</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Han</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Pool</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Tran</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Dally</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Learning both weights and connections for efficient neural network</article-title>. <source>Adv. Neural Information Processing Systems</source> <volume>28</volume>.</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Holzbaur</surname>
<given-names>K. R.</given-names>
</name>
<name>
<surname>Murray</surname>
<given-names>W. M.</given-names>
</name>
<name>
<surname>Delp</surname>
<given-names>S. L.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>A model of the upper extremity for simulating musculoskeletal surgery and analyzing neuromuscular control</article-title>. <source>Ann. Biomedical Engineering</source> <volume>33</volume>, <fpage>829</fpage>&#x2013;<lpage>840</lpage>. <pub-id pub-id-type="doi">10.1007/s10439-005-3320-7</pub-id>
<pub-id pub-id-type="pmid">16078622</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kaufmann</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Aksan</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Black</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Hilliges</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Pons-Moll</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Deep inertial poser: learning to reconstruct human pose from sparse inertial measurements in real time</article-title>. <source>ACM Trans. Graph. (TOG)</source> <volume>37</volume>, <fpage>1</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1145/3272127.3275108</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Karniadakis</surname>
<given-names>G. E.</given-names>
</name>
<name>
<surname>Kevrekidis</surname>
<given-names>I. G.</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Perdikaris</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Physics-informed machine learning</article-title>. <source>Nat. Rev. Phys.</source> <volume>3</volume>, <fpage>422</fpage>&#x2013;<lpage>440</lpage>. <pub-id pub-id-type="doi">10.1038/s42254-021-00314-5</pub-id>
</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Kawamoto</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kanbe</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sankai</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2003</year>). &#x201c;<article-title>Power assist method for hal-3 using emg-based feedback controller</article-title>,&#x201d; in <source>SMC&#x2019;03 conference proceedings. 2003 IEEE international conference on systems, man and cybernetics. Conference theme - system security and assurance (Cat. No.03CH37483)</source>, <volume>2</volume>, <fpage>1648</fpage>&#x2013;<lpage>1653</lpage>. <pub-id pub-id-type="doi">10.1109/ICSMC.2003.1244649</pub-id>
</mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Khant</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gouwanda</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Gopalai</surname>
<given-names>A. A.</given-names>
</name>
<name>
<surname>Lim</surname>
<given-names>K. H.</given-names>
</name>
<name>
<surname>Foong</surname>
<given-names>C. C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Estimation of lower extremity muscle activity in gait using the wearable inertial measurement units and neural network</article-title>. <source>Sensors</source> <volume>23</volume>, <fpage>556</fpage>. <pub-id pub-id-type="doi">10.3390/s23010556</pub-id>
<pub-id pub-id-type="pmid">36617154</pub-id>
</mixed-citation>
</ref>
<ref id="B1">
<mixed-citation publication-type="web">
<person-group person-group-type="author">
<name>
<surname>Kingma</surname>
<given-names>D. P.</given-names>
</name>
<name>
<surname>Ba</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Adam: A Method for Stochastic Optimization</article-title>. <source>arXiv preprint arXiv:</source> <fpage>1412.6980</fpage>.</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kok</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hol</surname>
<given-names>J. D.</given-names>
</name>
<name>
<surname>Sch&#xf6;n</surname>
<given-names>T. B.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Using inertial sensors for position and orientation estimation</article-title>, <volume>11</volume>, <fpage>1</fpage>&#x2013;<lpage>153</lpage>. <pub-id pub-id-type="doi">10.1561/2000000094</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kwon</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Tong</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Haresamudram</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Abowd</surname>
<given-names>G. D.</given-names>
</name>
<name>
<surname>Lane</surname>
<given-names>N. D.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Imutube: automatic extraction of virtual on-body accelerometry from video for human activity recognition</article-title>. <source>Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.</source> <volume>4</volume>, <fpage>1</fpage>&#x2013;<lpage>29</lpage>. <pub-id pub-id-type="doi">10.1145/3411841</pub-id>
<pub-id pub-id-type="pmid">35846237</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Lane</surname>
<given-names>N. D.</given-names>
</name>
<name>
<surname>Bhattacharya</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Georgiev</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Forlivesi</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kawsar</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>An early resource characterization of deep learning on wearables, smartphones and internet-of-things devices</article-title>,&#x201d; in <source>Proceedings of the 2015 international workshop on internet of things towards applications</source>, <fpage>7</fpage>&#x2013;<lpage>12</lpage>.</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Lea</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Flynn</surname>
<given-names>M. D.</given-names>
</name>
<name>
<surname>Vidal</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Reiter</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hager</surname>
<given-names>G. D.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Temporal convolutional networks for action segmentation and detection</article-title>,&#x201d; in <source>
<italic>Proceedings of the IEEE conference on computer vision and pattern recognition</italic>
</source>, <fpage>156</fpage>&#x2013;<lpage>165</lpage>.</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Linka</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Hillg&#xe4;rtner</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Abdolazizi</surname>
<given-names>K. P.</given-names>
</name>
<name>
<surname>Aydin</surname>
<given-names>R. C.</given-names>
</name>
<name>
<surname>Itskov</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cyron</surname>
<given-names>C. J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Constitutive artificial neural networks: a fast and general approach to predictive data-driven constitutive modeling by deep learning</article-title>. <source>J. Comput. Phys.</source> <volume>429</volume>, <fpage>110010</fpage>. <pub-id pub-id-type="doi">10.1016/j.jcp.2020.110010</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>C. K.</given-names>
</name>
<name>
<surname>Popovi&#x107;</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Synthesis of complex dynamic character motion from simple animations</article-title>. <source>ACM Trans. Graph. (TOG)</source> <volume>21</volume>, <fpage>408</fpage>&#x2013;<lpage>416</lpage>. <pub-id pub-id-type="doi">10.1145/566570.566596</pub-id>
</mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ma</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Bian</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Alsayed</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Ding</surname>
<given-names>Z.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Measuring lower-limb kinematics in walking: wearable sensors achieve comparable reliability to motion capture systems and smartphone cameras</article-title>. <source>Sensors</source> <volume>25</volume>, <fpage>2899</fpage>. <pub-id pub-id-type="doi">10.3390/s25092899</pub-id>
<pub-id pub-id-type="pmid">40363335</pub-id>
</mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>McConnochie</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Fox</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Bellenger</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Thewlis</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2025</year>). <article-title>Optimal control simulations tracking wearable sensor signals provide comparable running gait kinematics to marker-based motion capture</article-title>. <source>PeerJ</source> <volume>13</volume>, <fpage>e19035</fpage>. <pub-id pub-id-type="doi">10.7717/peerj.19035</pub-id>
<pub-id pub-id-type="pmid">40061227</pub-id>
</mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>McGinley</surname>
<given-names>J. L.</given-names>
</name>
<name>
<surname>Baker</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Wolfe</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Morris</surname>
<given-names>M. E.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>The reliability of three-dimensional kinematic gait measurements: a systematic review</article-title>. <source>Gait and Posture</source> <volume>29</volume>, <fpage>360</fpage>&#x2013;<lpage>369</lpage>. <pub-id pub-id-type="doi">10.1016/j.gaitpost.2008.09.003</pub-id>
<pub-id pub-id-type="pmid">19013070</pub-id>
</mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mundt</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Koeppe</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>David</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Witter</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Bamer</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Potthast</surname>
<given-names>W.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Estimation of gait mechanics based on simulated and measured imu data using an artificial neural network</article-title>. <source>Front. Bioengineering Biotechnology</source> <volume>8</volume>, <fpage>41</fpage>. <pub-id pub-id-type="doi">10.3389/fbioe.2020.00041</pub-id>
<pub-id pub-id-type="pmid">32117923</pub-id>
</mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>N&#xfc;esch</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Roos</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Pagenstert</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>M&#xfc;ndermann</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Measuring joint kinematics of treadmill walking and running: comparison between an inertial sensor based system and a camera-based system</article-title>. <source>J. Biomechanics</source> <volume>57</volume>, <fpage>32</fpage>&#x2013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2017.03.015</pub-id>
<pub-id pub-id-type="pmid">28366438</pub-id>
</mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Patel</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Park</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Bonato</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Chan</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Rodgers</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>A review of wearable sensors and systems with application in rehabilitation</article-title>. <source>J. Neuroengineering Rehabilitation</source> <volume>9</volume>, <fpage>21</fpage>. <pub-id pub-id-type="doi">10.1186/1743-0003-9-21</pub-id>
<pub-id pub-id-type="pmid">22520559</pub-id>
</mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peters</surname>
<given-names>D. M.</given-names>
</name>
<name>
<surname>O&#x2019;Brien</surname>
<given-names>E. S.</given-names>
</name>
<name>
<surname>Kamrud</surname>
<given-names>K. E.</given-names>
</name>
<name>
<surname>Roberts</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Rooney</surname>
<given-names>T. A.</given-names>
</name>
<name>
<surname>Thibodeau</surname>
<given-names>K. P.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Utilization of wearable technology to assess gait and mobility post-stroke: a systematic review</article-title>. <source>J. Neuroengineering Rehabilitation</source> <volume>18</volume>, <fpage>67</fpage>. <pub-id pub-id-type="doi">10.1186/s12984-021-00863-x</pub-id>
</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Phinyomark</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Khushaba</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Scheme</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Feature extraction and selection for myoelectric control based on wearable emg sensors</article-title>. <source>Sensors</source> <volume>18</volume>, <fpage>1615</fpage>. <pub-id pub-id-type="doi">10.3390/s18051615</pub-id>
<pub-id pub-id-type="pmid">29783659</pub-id>
</mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prisco</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Pirozzi</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Santone</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Esposito</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Cesarelli</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Amato</surname>
<given-names>F.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>Validity of wearable inertial sensors for gait analysis: a systematic review</article-title>. <source>Diagnostics</source> <volume>15</volume>, <fpage>36</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics15010036</pub-id>
<pub-id pub-id-type="pmid">39795564</pub-id>
</mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Raissi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Perdikaris</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Karniadakis</surname>
<given-names>G. E.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Physics-informed neural networks: a deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations</article-title>. <source>J. Comput. Physics</source> <volume>378</volume>, <fpage>686</fpage>&#x2013;<lpage>707</lpage>. <pub-id pub-id-type="doi">10.1016/j.jcp.2018.10.045</pub-id>
</mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Raissi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Yazdani</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Karniadakis</surname>
<given-names>G. E.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Hidden fluid mechanics: learning velocity and pressure fields from flow visualizations</article-title>. <source>Science</source> <volume>367</volume>, <fpage>1026</fpage>&#x2013;<lpage>1030</lpage>. <pub-id pub-id-type="doi">10.1126/science.aaw4741</pub-id>
<pub-id pub-id-type="pmid">32001523</pub-id>
</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Reddi</surname>
<given-names>V. J.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kanter</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Mattson</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Schmuelling</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>C.-J.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). &#x201c;<article-title>Mlperf inference benchmark</article-title>,&#x201d; in <source>2020 ACM/IEEE 47th annual international symposium on computer Architecture (ISCA)</source> (<publisher-name>IEEE</publisher-name>), <fpage>446</fpage>&#x2013;<lpage>459</lpage>.</mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Roetenberg</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Luinge</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Slycke</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Xsens mvn: full 6dof human motion tracking using miniature inertial sensors</article-title>. <source>Xsens Motion Technol. BV, Tech. Rep.</source> <volume>1</volume>, <fpage>1</fpage>&#x2013;<lpage>7</lpage>.</mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schwartz</surname>
<given-names>M. H.</given-names>
</name>
<name>
<surname>Rozumalski</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Trost</surname>
<given-names>J. P.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>The effect of walking speed on the gait of typically developing children</article-title>. <source>J. Biomechanics</source> <volume>41</volume>, <fpage>1639</fpage>&#x2013;<lpage>1650</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2008.03.015</pub-id>
<pub-id pub-id-type="pmid">18466909</pub-id>
</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seel</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Raisch</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Schauer</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Imu-based joint angle measurement for gait analysis</article-title>. <source>Sensors</source> <volume>14</volume>, <fpage>6891</fpage>&#x2013;<lpage>6909</lpage>. <pub-id pub-id-type="doi">10.3390/s140406891</pub-id>
<pub-id pub-id-type="pmid">24743160</pub-id>
</mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Seth</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hicks</surname>
<given-names>J. L.</given-names>
</name>
<name>
<surname>Uchida</surname>
<given-names>T. K.</given-names>
</name>
<name>
<surname>Habib</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dembia</surname>
<given-names>C. L.</given-names>
</name>
<name>
<surname>Dunne</surname>
<given-names>J. J.</given-names>
</name>
<etal/>
</person-group> (<year>2018</year>). <article-title>Opensim: simulating musculoskeletal dynamics and neuromuscular control to study human and animal movement</article-title>. <source>PLoS Computational Biology</source> <volume>14</volume>, <fpage>e1006223</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pcbi.1006223</pub-id>
<pub-id pub-id-type="pmid">30048444</pub-id>
</mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sol&#xe0;</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Deray</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Atchuthan</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>A micro lie theory for state estimation in robotics</article-title>. <source>arXiv Preprint arXiv:1812.01537</source>.</mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Suvorkin</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Garcia-Fernandez</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Gonz&#xe1;lez-Casado</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Rovira-Garcia</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Assessment of noise of mems imu sensors of different grades for gnss/imu navigation</article-title>. <source>Sensors</source> <volume>24</volume>, <fpage>1953</fpage>. <pub-id pub-id-type="doi">10.3390/s24061953</pub-id>
<pub-id pub-id-type="pmid">38544217</pub-id>
</mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sze</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y.-H.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>T.-J.</given-names>
</name>
<name>
<surname>Emer</surname>
<given-names>J. S.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Efficient processing of deep neural networks: a tutorial and survey</article-title>. <source>Proc. IEEE</source> <volume>105</volume>, <fpage>2295</fpage>&#x2013;<lpage>2329</lpage>. <pub-id pub-id-type="doi">10.1109/jproc.2017.2761740</pub-id>
</mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Taylor</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Miller</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Kaufman</surname>
<given-names>K. R.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Static and dynamic validation of inertial measurement units</article-title>. <source>Gait and Posture</source> <volume>57</volume>, <fpage>80</fpage>&#x2013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.1016/j.gaitpost.2017.05.026</pub-id>
<pub-id pub-id-type="pmid">28578138</pub-id>
</mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Thelen</surname>
<given-names>D. G.</given-names>
</name>
<name>
<surname>Anderson</surname>
<given-names>F. C.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Using computed muscle control to generate forward dynamic simulations of human walking from experimental data</article-title>. <source>J. Biomechanics</source> <volume>39</volume>, <fpage>1107</fpage>&#x2013;<lpage>1115</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2005.02.010</pub-id>
<pub-id pub-id-type="pmid">16023125</pub-id>
</mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Tucker</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Novoseller</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yue</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Burdick</surname>
<given-names>J. W.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). &#x201c;<article-title>Human preference-based learning for high-dimensional optimization of exoskeleton walking gaits</article-title>,&#x201d; in <source>Proceedings of the IEEE/RSJ international conference on intelligent robots and systems</source>, <fpage>3423</fpage>&#x2013;<lpage>3430</lpage>.</mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zou</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>You</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Luo</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>A novel image classification approach <italic>via</italic> dense-mobilenet models</article-title>. <source>Mob. Inf. Syst.</source> <volume>2020</volume>, <fpage>7602384</fpage>&#x2013;<lpage>7602388</lpage>. <pub-id pub-id-type="doi">10.1155/2020/7602384</pub-id>
</mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Westenbroek</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Castaneda</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Agrawal</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Sastry</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sreenath</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Lyapunov design for robust and efficient robotic reinforcement learning</article-title>. <source>arXiv Preprint arXiv:2208.06721</source>.</mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Yi</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Habermann</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Shimada</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Golyanik</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Theobalt</surname>
<given-names>C.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). &#x201c;<article-title>Physical inertial poser (pip): Physics-aware real-time human motion tracking from sparse inertial sensors</article-title>,&#x201d; in <source>Proceedings of the IEEE/CVF conference on computer vision and pattern recognition</source>, <fpage>13167</fpage>&#x2013;<lpage>13178</lpage>.</mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Dao</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Karniadakis</surname>
<given-names>G. E.</given-names>
</name>
<name>
<surname>Suresh</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Analyses of internal structures and defects in materials using physics-informed neural networks</article-title>. <source>Sci. Advances</source> <volume>8</volume>, <fpage>eabk0644</fpage>. <pub-id pub-id-type="doi">10.1126/sciadv.abk0644</pub-id>
<pub-id pub-id-type="pmid">35171670</pub-id>
</mixed-citation>
</ref>
</ref-list>
</back>
</article>