<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Bioeng. Biotechnol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Bioengineering and Biotechnology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Bioeng. Biotechnol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2296-4185</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1762919</article-id>
<article-id pub-id-type="doi">10.3389/fbioe.2026.1762919</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Optimizing wearable IMU configurations for running gait analysis: a machine learning-based sensor fusion approach</article-title>
<alt-title alt-title-type="left-running-head">Yuan et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fbioe.2026.1762919">10.3389/fbioe.2026.1762919</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Yuan</surname>
<given-names>Ye</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/3307644"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing - original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Yu</surname>
<given-names>Yaohui</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal Analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Cai</surname>
<given-names>Shanshan</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Cheng</surname>
<given-names>Weidong</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x26; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/">Writing - review and editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
</contrib-group>
<aff id="aff1">
<label>1</label>
<institution>College of Physical Education, Xuzhou University of Technology</institution>, <city>Xuzhou</city>, <state>Jiangsu</state>, <country country="CN">China</country>
</aff>
<aff id="aff2">
<label>2</label>
<institution>Graduate School, Tianjin University of Traditional Chinese Medicine</institution>, <city>Tianjin</city>, <country country="CN">China</country>
</aff>
<aff id="aff3">
<label>3</label>
<institution>Faculty of Health and Medicine, Lancaster University</institution>, <city>Lancaster</city>, <country country="GB">United Kingdom</country>
</aff>
<aff id="aff4">
<label>4</label>
<institution>General Education College, Cyberspace Security University of China</institution>, <city>Wuhan</city>, <country country="CN">China</country>
</aff>
<author-notes>
<corresp id="c001">
<label>&#x2a;</label>Correspondence: Weidong Cheng, <email xlink:href="mailto:z0005220@163.com">z0005220@163.com</email>
</corresp>
<fn fn-type="equal" id="fn1">
<label>
<sup>&#x2020;</sup>
</label>
<p>These authors have contributed equally to this work and share first authorship</p>
</fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-11">
<day>11</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>14</volume>
<elocation-id>1762919</elocation-id>
<history>
<date date-type="received">
<day>08</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>02</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>06</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2026 Yuan, Yu, Cai and Cheng.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Yuan, Yu, Cai and Cheng</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-11">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Objective</title>
<p>This study applies machine learning (ML) techniques to address this hardware limitation by determining the feasibility of reducing a high-dimensional 17-sensor network to a &#x201c;minimal-optimal&#x201d; subset without compromising measurement accuracy. Unlike previous studies focusing on activity classification, we systematically quantify the information redundancy in kinematic chains to optimize sensor fusion architectures.</p>
</sec>
<sec>
<title>Methods</title>
<p>Twenty-five recreational runners performed treadmill protocols at three speeds (8, 10, and 12&#xa0;km/h) while wearing a gold-standard Xsens MVN system (17 IMUs). Raw accelerometer and gyroscope signals were programmatically subsetted to simulate minimal configurations. A Random Forest (RF) regression model was selected after benchmarking against baseline Linear Regression and deep learning (LSTM) models. A comprehensive vector of time-and frequency-domain features was extracted via sliding windows, and Recursive Feature Elimination (RFE) was applied to identify the most critical signal attributes.</p>
</sec>
<sec>
<title>Results</title>
<p>Analysis revealed that a single lumbosacral IMU could successfully reconstruct global parameters (Cadence, Vertical Oscillation, Ground Contact Time) with high precision (<inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0.95</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>5</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>), outperforming standard commercial benchmarks. However, this single-node setup failed to detect gait asymmetry (<inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.52</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). A distributed three-sensor fusion configuration (Lumbosacral &#x2b; Bilateral Ankles) resolved this limitation, achieving results comparable to the full-body system for all parameters (<inline-formula id="inf3">
<mml:math id="m3">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0.91</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>7.12</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>). Performance remained robust across all running speeds, with only a marginal accuracy drop at 12&#xa0;km/h.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>This study validates a machine learning framework for optimizing sensor array design. The proposed three-sensor fusion offers a robust, low-cost architectural blueprint for next-generation wearable devices, proving that complex deep learning is not always required when sensor placement is biomechanically optimized.</p>
</sec>
</abstract>
<kwd-group>
<kwd>machine learning</kwd>
<kwd>random forest regression</kwd>
<kwd>running gait analysis</kwd>
<kwd>sensor fusion</kwd>
<kwd>wearable IMU</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="5"/>
<equation-count count="3"/>
<ref-count count="29"/>
<page-count count="11"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Biomechanics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Running is one of the most popular and accessible forms of physical activity worldwide, offering significant benefits for cardiovascular health, mental wellbeing, and overall longevity (<xref ref-type="bibr" rid="B13">Lee et al., 2014</xref>). However, this high-impact, repetitive activity is also associated with a remarkably high injury risk. Epidemiological studies consistently report annual incidence rates of running-related injuries (RRIs) between 19% and 79%, depending on the population and definitions used (<xref ref-type="bibr" rid="B20">van Gent et al., 2007</xref>). These injuries, such as patellofemoral pain syndrome, Achilles tendinopathy, and medial tibial stress syndrome, not only disrupt training but can also lead to long-term functional impairment and significant medical costs (<xref ref-type="bibr" rid="B19">Taunton et al., 2002</xref>). Recently, advanced data-driven approaches have been explored to better understand injury mechanisms, such as predicting ligament fatigue failure risks using deep learning models (<xref ref-type="bibr" rid="B58">Xu et al., 2022</xref>; <xref ref-type="bibr" rid="B26">Xu et al., 2025</xref>), and evaluating the impact of sensor-axis combinations on activity recognition accuracy in clinical settings (<xref ref-type="bibr" rid="B27">Ronao and Cho, 2016</xref>).</p>
<p>Biomechanical research has identified a range of aberrant running gait parameters associated with an increased risk of RRIs. For instance, excessive vertical impact forces, high vertical oscillation (VO), prolonged ground contact time (GCT), and excessive pronation are considered key risk factors (<xref ref-type="bibr" rid="B10">Hreljac, 2004</xref>; <xref ref-type="bibr" rid="B6">Davis and Powers, 2010</xref>). Consequently, the ability to accurately and objectively assess these biomechanical parameters is crucial for developing personalized training programs, providing real-time technical feedback, and guiding rehabilitation protocols.</p>
<p>For decades, the &#x201c;gold standard&#x201d; for gait analysis has been laboratory-based 3D optical motion capture (e.g., Vicon, Qualisys) combined with embedded force plates (<xref ref-type="bibr" rid="B22">Winter, 2009</xref>). These systems provide unparalleled precision in quantifying joint kinematics and kinetics. However, their application is severely limited. They are prohibitively expensive, confined to controlled laboratory environments, and require highly specialized expertise for data collection and processing. These factors make them inaccessible to the vast majority of coaches, athletes, and recreational runners.</p>
<p>In recent years, the development of micro-electromechanical systems (MEMS) has given rise to small, low-cost, wireless inertial measurement units (IMUs). These sensors, typically containing accelerometers, gyroscopes, and magnetometers, can capture body segment orientation and movement in real-time (<xref ref-type="bibr" rid="B16">Poitras et al., 2019</xref>). This wearable technology has begun to revolutionize biomechanics, enabling long-term, continuous monitoring in ecologically valid, real-world environments. However, to obtain full-body kinematics comparable to laboratory systems, researchers often deploy a large array of IMUs (e.g., 17 sensors) across various body segments (<xref ref-type="bibr" rid="B16">Poitras et al., 2019</xref>). This &#x201c;Christmas tree&#x201d; effect, while feasible for research, presents significant practical barriers: it is still costly and complex, places a heavy time burden on the user for setup (often 15&#x2013;30&#xa0;min), and negatively impacts user comfort, which may even alter the natural gait being measured (<xref ref-type="bibr" rid="B5">Caldas et al., 2017</xref>).</p>
<p>This leads to a core dilemma in wearable sports analysis: the trade-off between convenience and accuracy. On one hand, consumer-grade wearables (e.g., smartwatches, footpods) are highly convenient but typically offer only basic metrics (like cadence or step count), lacking deep biomechanical insight. On the other, research-grade multi-sensor systems are accurate but entirely impractical for daily use. A method to bridge this gap is urgently needed. Recent systematic evaluations have underscored that the selection of an &#x201c;optimal-minimal&#x201d; sensor configuration is not merely a hardware constraint but a critical step in preserving the biomechanical integrity of high-frequency kinematic data (<xref ref-type="bibr" rid="B28">Huang et al., 2018</xref>; <xref ref-type="bibr" rid="B23">Wouda et al., 2018</xref>).</p>
<p>This study proposes that machine learning (ML) is the ideal tool to address this hardware-accuracy dilemma. From a signal processing perspective, human locomotion involves highly coordinated kinematic chains, implying significant information redundancy across different body segments. We hypothesize that data acquired from critical nodes&#x2014;specifically the Center of Mass (CoM) and end-effectors&#x2014;contain sufficient latent features to estimate the key spatio-temporal gait scalars of the system. By training supervised regression models to learn the non-linear mapping between these minimal inputs and full-system outputs, we can effectively &#x201c;virtualize&#x201d; the missing sensors. It is important to clarify that unlike full-body motion capture which reconstructs continuous 3D joint angles, this study focuses specifically on the precise regression of discrete, clinically relevant scalar metrics (Ground Contact Time, Vertical Oscillation, Cadence, and Symmetry Index).</p>
<p>Therefore, the primary objective of this paper is to systematically evaluate the parameter estimation performance of reduced IMU configurations (1&#x2013;3 sensors) against a gold-standard 17-sensor network. We aim to identify a &#x201c;minimal-optimal&#x201d; design specification that provides the best trade-off between engineering constraints (device count, complexity) and data validity. We investigate whether: (1) a single central node (Lumbosacral) captures sufficient global signal energy for temporal parameter estimation; and (2) whether a multi-node sensor fusion approach (adding distal sensors) is required to resolve signal ambiguities related to asymmetry (<xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Experimental setup and conceptual framework.</p>
</caption>
<graphic xlink:href="fbioe-14-1762919-g001.tif">
<alt-text content-type="machine-generated">Diagram illustrating three configurations for a running motion analysis. Panel A shows a figure with a seventeen-sensor system as the gold standard. Panel B shows minimal configurations tested: B1 with lumbar-only, B2 with ankles-only, and B3 with lumbar and ankles. Panel C outlines a machine learning pipeline, starting from minimal IMU raw signals, proceeding to feature engineering, feeding into a machine learning model using random forest, and culminating in predicted gait parameters such as ground contact time, velocity, and step length.</alt-text>
</graphic>
</fig>
</sec>
<sec sec-type="methods" id="s2">
<label>2</label>
<title>Methods</title>
<sec id="s2-1">
<label>2.1</label>
<title>Participants</title>
<p>We recruited twenty-five recreational runners (15 male, 10 female) through local running clubs and social media advertisements. Participant characteristics are detailed in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Participant characteristics.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Characteristic</th>
<th align="left">Mean</th>
<th align="center">
<inline-formula id="inf4">
<mml:math id="m4">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="left">Range</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">
<inline-formula id="inf5">
<mml:math id="m5">
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">25</td>
<td align="left">-</td>
<td align="left">-</td>
</tr>
<tr>
<td align="left">Sex</td>
<td align="left">15&#xa0;M/10&#xa0;F</td>
<td align="left">-</td>
<td align="left">-</td>
</tr>
<tr>
<td align="left">Age (years)</td>
<td align="left">29.5</td>
<td align="left">5.8</td>
<td align="left">21&#x2013;42</td>
</tr>
<tr>
<td align="left">Height (cm)</td>
<td align="left">174.2</td>
<td align="left">8.1</td>
<td align="left">161&#x2013;189</td>
</tr>
<tr>
<td align="left">Weight (kg)</td>
<td align="left">68.7</td>
<td align="left">10.3</td>
<td align="left">52.5&#x2013;88.0</td>
</tr>
<tr>
<td align="left">Running experience (years)</td>
<td align="left">6.2</td>
<td align="left">3.1</td>
<td align="left">2&#x2013;15</td>
</tr>
<tr>
<td align="left">Weekly volume (km/week)</td>
<td align="left">32.0</td>
<td align="left">11.5</td>
<td align="left">15&#x2013;55</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Inclusion criteria for participation were: (1) age between 18 and 45&#xa0;years; (2) an average weekly running volume of at least 15&#xa0;km over the past year; (3) the ability to run continuously at 12&#xa0;km/h for at least 5&#xa0;min; and (4) no history of cardiovascular or neurological diseases. Exclusion criteria included: (1) any lower-limb musculoskeletal injuries that affected running gait within the past 6&#xa0;months; (2) current pain or undergoing rehabilitation for a running-related injury; or (3) any known balance disorders. All participants provided written informed consent prior to testing, as approved by the Institutional Review Board (IRB Protocol &#x23;2024-118).</p>
</sec>
<sec id="s2-2">
<label>2.2</label>
<title>Experimental equipment and setup</title>
<p>We used the Xsens MVN Awinda inertial motion capture system (Xsens Technologies B.V., Netherlands) as the gold standard. This system comprises 17 wireless IMUs (MTw2) sampling at 100&#xa0;Hz. Sensors were attached to the head, sternum, pelvis (L5/S1), and bilaterally on the upper arms, forearms, hands, thighs, shanks, and feet using manufacturer-recommended Velcro straps. The Xsens system, which uses proprietary sensor fusion algorithms to provide 3D full-body kinematics, has been widely validated for gait analysis (<xref ref-type="bibr" rid="B16">Poitras et al., 2019</xref>). All trials were conducted on a Woodway Pro (Woodway USA, Inc.) treadmill, which provided stable and controllable running speeds.</p>
</sec>
<sec id="s2-3">
<label>2.3</label>
<title>Experimental protocol</title>
<p>Participants attended a single 60-min laboratory session. The main protocol consisted of three 3-min running trials at fixed speeds of 8&#xa0;km/h, 10&#xa0;km/h, and 12&#xa0;km/h, representing slow, medium, and tempo paces. A 2-min standing rest period was provided between trials. The order of speeds was randomized to mitigate fatigue effects. During all trials, data from all 17 IMUs were synchronously recorded by the Xsens MVN Analyze software.</p>
</sec>
<sec id="s2-4">
<label>2.4</label>
<title>Data processing and parameter definition</title>
<sec id="s2-4-1">
<label>2.4.1</label>
<title>Gold standard parameter extraction</title>
<p>Data from the 17-sensor system was processed using Xsens MVN Analyze software to reconstruct full-body kinematics. The software&#x2019;s validated gait analysis pipeline was used to automatically detect gait cycles and compute the gold standard parameters for every step. The five key parameters of interest are defined in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Definitions of target running gait parameters.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Parameter</th>
<th align="left">Unit</th>
<th align="left">Definition</th>
<th align="left">Biomechanical relevance</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">Cadence</td>
<td align="left">spm</td>
<td align="left">Steps per minute</td>
<td align="left">Key variable influencing impact and efficiency</td>
</tr>
<tr>
<td align="left">Ground contact time (GCT)</td>
<td align="left">ms</td>
<td align="left">Duration from initial foot-strike (IC) to toe-off (TO) of the same foot</td>
<td align="left">Correlates with running economy and impact loading</td>
</tr>
<tr>
<td align="left">Flight time (FT)</td>
<td align="left">ms</td>
<td align="left">Duration from one foot&#x2019;s TO to the other foot&#x2019;s IC.</td>
<td align="left">Reflects propulsive power and vertical work</td>
</tr>
<tr>
<td align="left">Vertical oscillation (VO)</td>
<td align="left">cm</td>
<td align="left">Peak-to-trough vertical displacement of the L5/S1 (pelvis) marker during a gait cycle</td>
<td align="left">Proxy for vertical energy expenditure</td>
</tr>
<tr>
<td align="left">Gait symmetry index (SI)</td>
<td align="left">%</td>
<td align="left">Percentage difference between left and right GCT. Calculated as <inline-formula id="inf6">
<mml:math id="m6">
<mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mtext>Shorter</mml:mtext>
<mml:mi>G</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>T</mml:mi>
<mml:mo>/</mml:mo>
<mml:mtext>Longer</mml:mtext>
<mml:mi>G</mml:mi>
<mml:mi>C</mml:mi>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#xd7;</mml:mo>
<mml:mn>100</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">Assesses movement asymmetry, linked to injury risk</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s2-4-2">
<label>2.4.2</label>
<title>Minimal configuration subset construction</title>
<p>To simulate reduced-sensor scenarios, we programmatically extracted the raw 3-axis accelerometer and 3-axis gyroscope data from specific sensor subsets from the complete 17-sensor dataset. We tested three primary minimal configurations: Config 1 (C1): Lumbar-Only, using only the L5/S1 sensor (1 IMU); Config 2 (C2): Ankles-Only, using both ankle sensors (2 IMUs); and Config 3 (C3): Lumbar &#x2b; Ankles, combining the L5/S1 and both ankle sensors (3 IMUs).</p>
</sec>
</sec>
<sec id="s2-5">
<label>2.5</label>
<title>Machine learning model</title>
<sec id="s2-5-1">
<label>2.5.1</label>
<title>Feature engineering</title>
<p>Raw IMU signals (3-axis acceleration and 3-axis angular velocity) were processed using a sliding-window approach to transform high-frequency time-series data into a feature space suitable for regression (<xref ref-type="fig" rid="F2">Figure 2</xref>). Signals were segmented into 250&#xa0;ms windows with a 50% overlap. For each window, a vector of 52 statistical and frequency-domain features was computed per channel to capture signal energy, distribution, and periodicity. To address the potential for overfitting and computational redundancy, we implemented a Recursive Feature Elimination (RFE) process. This analysis reduced the input dimension from 52 to 18 key features per sensor, retaining 99% of the explanatory variance while significantly lowering the computational load for potential embedded deployment.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Raw signal examples from key IMU locations. <bold>(A)</bold> L5/S1 vertical acceleration. <bold>(B)</bold> Ankle sagittal angular velocity.</p>
</caption>
<graphic xlink:href="fbioe-14-1762919-g002.tif">
<alt-text content-type="machine-generated">Two line graphs labeled A and B. Graph A shows acceleration in meters per second squared over time in seconds, with a fluctuating blue line. Graph B displays angular velocity in radians per second over time, with a fluctuating red line. Both graphs show a periodic pattern from zero to two seconds.</alt-text>
</graphic>
</fig>
<p>The comprehensive feature set comprised metrics from both the time and frequency domains to fully characterize the gait signal dynamics. Time-domain statistics, including the mean, standard deviation (STD), root mean square (RMS), minimum, maximum, peak-to-peak amplitude, skewness, kurtosis, and zero-crossing rate, were calculated to capture the signal intensity and morphological characteristics of the impact and swing phases. Complementing these, frequency-domain features&#x2014;specifically dominant frequency, spectral energy, and spectral entropy&#x2014;were extracted after applying a Fast Fourier Transform (FFT) to characterize the cyclic nature of the running gait. This process yielded a high-dimensional feature vector (calculated as the number of channels multiplied by the feature count, e.g., for the 6-channel single-sensor setup), which was subsequently standardized via Z-score normalization prior to model training to ensure scale invariance.</p>
</sec>
<sec id="s2-5-2">
<label>2.5.2</label>
<title>Model selection and training</title>
<p>While Random Forest (RF) was our primary candidate due to its interpretability and suitability for tabular data, we conducted a comparative analysis to justify its selection. We benchmarked the RF model against a baseline Linear Regression (LR) model and a Long Short-Term Memory (LSTM) neural network. Preliminary results indicated that RF significantly outperformed LR (handling non-linear gait dynamics) and achieved accuracy comparable to the LSTM (Mean Absolute Error difference &#x3c;2&#xa0;ms) but with 1/10th of the training time and significantly lower inference latency. Consequently, RF was selected as the optimal &#x201c;sweet spot&#x201d; between accuracy and edge-computing feasibility. Compared to deep neural networks, tree-based ensemble methods like Random Forest have demonstrated superior robustness and lower energy consumption when deployed on low-power microcontrollers for real-time gait event detection (<xref ref-type="bibr" rid="B14">Hannink et al., 2017</xref>; <xref ref-type="bibr" rid="B29">Ord&#x00F3;&#x00F1;ez &#x0026; Roggen, 2016</xref>).</p>
</sec>
<sec id="s2-5-3">
<label>2.5.3</label>
<title>Validation strategy and statistical analysis</title>
<p>To ensure model generalization and prevent data leakage, we employed a strict subject-independent validation. The dataset was randomly split into a training set (20 participants, 80%) and a hold-out test set (5 participants, 20%). Within the training set, we performed a 10-fold cross-validation with a grid search to tune the RF hyperparameters. The optimization process yielded the following final hyperparameters for the deployed model: Number of Estimators &#x3d; 200, Maximum Depth &#x3d; None, Minimum Samples Split &#x3d; 2, Minimum Samples Leaf &#x3d; 1, and Max Features &#x3d; &#x2018;sqrt&#x2019;. These parameters were fixed for the final evaluation on the hold-out test set to ensure full reproducibility. Systematic effects of running speed on gold standard gait parameters were assessed using one-way repeated measures ANOVA with a significance threshold set at <inline-formula id="inf7">
<mml:math id="m7">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.05</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
</sec>
</sec>
<sec id="s2-6">
<label>2.6</label>
<title>Performance evaluation metrics</title>
<p>The performance of the finalized models was evaluated on the unseen 5-subject test set using three standard regression metrics.<list list-type="order">
<list-item>
<p>The Coefficient of Determination (<inline-formula id="inf8">
<mml:math id="m8">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>), which measures the proportion of the variance in the target variable that is predictable from the input features:</p>
</list-item>
</list>
<disp-formula id="equ1">
<mml:math id="m9">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:msub>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:msub>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</disp-formula>
<list list-type="simple">
<list-item>
<p>2. The Root Mean Square Error (<inline-formula id="inf9">
<mml:math id="m10">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>), which quantifies the average magnitude of the prediction error:</p>
</list-item>
</list>
<disp-formula id="equ2">
<mml:math id="m11">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>i</mml:mi>
</mml:munder>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:math>
</disp-formula>
<list list-type="simple">
<list-item>
<p>3. The Mean Absolute Percentage Error (<inline-formula id="inf10">
<mml:math id="m12">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>), which expresses the average error as a percentage:</p>
</list-item>
</list>
<disp-formula id="equ3">
<mml:math id="m13">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>100</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
<mml:mi>n</mml:mi>
</mml:mfrac>
<mml:mrow>
<mml:mstyle displaystyle="true">
<mml:munder>
<mml:mo>&#x2211;</mml:mo>
<mml:mi>i</mml:mi>
</mml:munder>
</mml:mstyle>
<mml:mrow>
<mml:mfenced open="|" close="|" separators="|">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
</p>
<p>In the above equations, <inline-formula id="inf11">
<mml:math id="m14">
<mml:mrow>
<mml:msub>
<mml:mi>y</mml:mi>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the actual value, <inline-formula id="inf12">
<mml:math id="m15">
<mml:mrow>
<mml:msub>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#x5e;</mml:mo>
</mml:mover>
<mml:mi>i</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula> is the predicted value, <inline-formula id="inf13">
<mml:math id="m16">
<mml:mrow>
<mml:mover accent="true">
<mml:mi>y</mml:mi>
<mml:mo>&#xaf;</mml:mo>
</mml:mover>
</mml:mrow>
</mml:math>
</inline-formula> is the mean of the actual values, and <inline-formula id="inf14">
<mml:math id="m17">
<mml:mrow>
<mml:mi>n</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> is the number of data points. Agreement was assessed using Bland-Altman plots.</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<sec id="s3-1">
<label>3.1</label>
<title>Descriptive gait data</title>
<p>As expected from the statistical analysis, running speed had a significant systematic effect on gait parameters. With increasing speed, GCT significantly decreased (<inline-formula id="inf15">
<mml:math id="m18">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.001</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Flight Time (FT) significantly increased (<inline-formula id="inf16">
<mml:math id="m19">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.001</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), Cadence slightly increased (<inline-formula id="inf17">
<mml:math id="m20">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.05</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), and VO significantly increased (<inline-formula id="inf18">
<mml:math id="m21">
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>0.001</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>). This confirmed our protocol successfully elicited a range of gait patterns suitable for a robust regression task.</p>
</sec>
<sec id="s3-2">
<label>3.2</label>
<title>Predictive performance of IMU configurations</title>
<p>Before evaluating the sensor configurations, a comparative analysis was conducted to validate the choice of the Random Forest (RF) algorithm. On the full feature set, the RF model significantly outperformed a baseline Linear Regression model (<inline-formula id="inf19">
<mml:math id="m22">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="normal">R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> improvement of &#x3e;0.15 for GCT), confirming the non-linear nature of gait dynamics. Furthermore, the RF model achieved predictive accuracy comparable to a Long Short-Term Memory (LSTM) deep neural network (Mean Absolute Error difference &#x3c;1.5&#xa0;ms), but with approximately 10% of the training time and significantly lower computational complexity.</p>
<p>The core findings for the sensor configurations using the optimized RF model are summarized in <xref ref-type="table" rid="T3">Table 3</xref>. The results confirm that the specific sensor subsets contain varying degrees of information sufficiency.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Summary of prediction performance for different IMU configurations on the test set.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Target parameter</th>
<th align="left">Metric</th>
<th align="left">Config 1: lumbar-only (1-IMU)</th>
<th align="left">Config 2: ankles-only (2-IMUs)</th>
<th align="left">Config 3: lumbar &#x2b; ankles (3-IMUs)</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="3" align="left">Cadence (spm)</td>
<td align="right">
<inline-formula id="inf20">
<mml:math id="m23">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">0.99</td>
<td align="left">0.98</td>
<td align="left">0.99</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf21">
<mml:math id="m24">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (spm)</td>
<td align="left">1.15</td>
<td align="left">1.48</td>
<td align="left">1.12</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf22">
<mml:math id="m25">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (%)</td>
<td align="left">0.85%</td>
<td align="left">1.09%</td>
<td align="left">0.89%</td>
</tr>
<tr>
<td rowspan="3" align="left">Vertical oscillation (cm)</td>
<td align="right">
<inline-formula id="inf23">
<mml:math id="m26">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">0.96</td>
<td align="left">0.75</td>
<td align="left">0.97</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf24">
<mml:math id="m27">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (cm)</td>
<td align="left">0.41</td>
<td align="left">1.22</td>
<td align="left">0.35</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf25">
<mml:math id="m28">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (%)</td>
<td align="left">4.12%</td>
<td align="left">11.89%</td>
<td align="left">3.88%</td>
</tr>
<tr>
<td rowspan="3" align="left">Ground contact time (ms)</td>
<td align="right">
<inline-formula id="inf26">
<mml:math id="m29">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">0.95</td>
<td align="left">0.97</td>
<td align="left">0.97</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf27">
<mml:math id="m30">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (ms)</td>
<td align="left">7.98</td>
<td align="left">5.81</td>
<td align="left">5.90</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf28">
<mml:math id="m31">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (%)</td>
<td align="left">4.88%</td>
<td align="left">3.24%</td>
<td align="left">3.31%</td>
</tr>
<tr>
<td rowspan="3" align="left">Flight time (ms)</td>
<td align="right">
<inline-formula id="inf29">
<mml:math id="m32">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">0.91</td>
<td align="left">0.94</td>
<td align="left">0.96</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf30">
<mml:math id="m33">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (ms)</td>
<td align="left">10.12</td>
<td align="left">8.15</td>
<td align="left">7.02</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf31">
<mml:math id="m34">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (%)</td>
<td align="left">6.15%</td>
<td align="left">5.02%</td>
<td align="left">4.22%</td>
</tr>
<tr>
<td rowspan="3" align="left">Gait symmetry index (%)</td>
<td align="right">
<inline-formula id="inf32">
<mml:math id="m35">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">0.52</td>
<td align="left">0.89</td>
<td align="left">0.91</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf33">
<mml:math id="m36">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (%)</td>
<td align="left">4.55</td>
<td align="left">2.11</td>
<td align="left">1.90</td>
</tr>
<tr>
<td align="left">
<inline-formula id="inf34">
<mml:math id="m37">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> (%)</td>
<td align="left">21.45%</td>
<td align="left">8.05%</td>
<td align="left">7.12%</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>
<inline-formula id="inf35">
<mml:math id="m38">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; Coefficient of Determination; <inline-formula id="inf36">
<mml:math id="m39">
<mml:mrow>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mi>S</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; Root Mean Square Error; <inline-formula id="inf37">
<mml:math id="m40">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; Mean Absolute Percentage Error. Bold indicates the best or equal-best performance for each parameter.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>To assess the model&#x2019;s robustness against running speed variations, we stratified the performance analysis by the three speed protocols (8, 10, and 12&#xa0;km/h).</p>
<p>As detailed in <xref ref-type="table" rid="T4">Table 4</xref>, the model demonstrated high stability. While the prediction error (MAPE) for temporal parameters like Ground Contact Time marginally increased at the highest speed (12&#xa0;km/h), this performance drop is attributed to the increased soft tissue artifacts and higher-magnitude impact transients typical of faster running speeds, which introduce non-linear noise into the accelerometer signal (e.g., sensor saturation or skin motion). Despite this, the coefficient of determination (<inline-formula id="inf38">
<mml:math id="m41">
<mml:mrow>
<mml:msup>
<mml:mi mathvariant="normal">R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>) remained consistently above 0.90 across all conditions. This suggests that the selected feature set effectively captures the biomechanical variations associated with speed changes.</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Performance stratification by running speed (Config 3: Lumbar &#x2b; Ankles).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Parameter</th>
<th align="center">Metric</th>
<th align="left">8&#xa0;km/h</th>
<th align="left">10&#xa0;km/h</th>
<th align="left">12&#xa0;km/h</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="2" align="left">Ground contact time (GCT)</td>
<td align="center">
<inline-formula id="inf39">
<mml:math id="m42">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">0.98</td>
<td align="left">0.97</td>
<td align="left">0.95</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf40">
<mml:math id="m43">
<mml:mtext>MAPE</mml:mtext>
</mml:math>
</inline-formula>
</td>
<td align="left">3.10%</td>
<td align="left">3.35%</td>
<td align="left">3.82%</td>
</tr>
<tr>
<td rowspan="2" align="left">Vertical oscillation (VO)</td>
<td align="center">
<inline-formula id="inf41">
<mml:math id="m44">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left">0.97</td>
<td align="left">0.97</td>
<td align="left">0.96</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf42">
<mml:math id="m45">
<mml:mtext>MAPE</mml:mtext>
</mml:math>
</inline-formula>
</td>
<td align="left">3.75%</td>
<td align="left">3.90%</td>
<td align="left">4.05%</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="s3-3">
<label>3.3</label>
<title>Predicted vs. actual analysis</title>
<p>This quantitative performance is visualized in the scatter plots of predicted versus actual values for the test set (<xref ref-type="fig" rid="F3">Figure 3</xref>). The models for VO (<xref ref-type="fig" rid="F3">Figure 3A</xref>) and GCT (<xref ref-type="fig" rid="F3">Figure 3B</xref>) from Config 1 (Lumbar-Only) showed data points tightly clustered around the line of identity (<inline-formula id="inf43">
<mml:math id="m46">
<mml:mrow>
<mml:mi>y</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula>), confirming their high accuracy. In contrast, the plot for SI from Config 1 (<xref ref-type="fig" rid="F3">Figure 3C</xref>) revealed a scattered pattern with poor correlation. This was corrected in the plot for SI from Config 3 (Lumbar &#x2b; Ankles) (<xref ref-type="fig" rid="F3">Figure 3D</xref>), which once again showed a tight cluster around the identity line, visually confirming the necessity of the ankle sensors for symmetry assessment.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Predicted vs. Actual Values Scatter Plots. <bold>(A)</bold> Vertical oscillation (VO) - Config 1(Lumbar). <bold>(B)</bold> Ground contact time (GCT) - Config (Lumbar). <bold>(C)</bold> Gait symmetry index (SI) - Config 1 (Lumbar). <bold>(D)</bold> Gait symmetry index (SI) - Config 3 (Lumbar &#x002B; Ankles).</p>
</caption>
<graphic xlink:href="fbioe-14-1762919-g003.tif">
<alt-text content-type="machine-generated">Four scatter plots labeled A, B, C, and D showing predicted versus actual values with regression lines. A: Predicted VO versus Actual VO, R-squared equals 0.96. B: Predicted GCT versus Actual GCT, R-squared equals 0.95. C: Predicted SI versus Actual SI, R-squared equals 0.52. D: Predicted SI versus Actual SI, R-squared equals 0.91. Each plot includes a line of identity and a regression line.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-4">
<label>3.4</label>
<title>Feature importance and model interpretability</title>
<p>To understand <italic>how</italic> the models were making predictions, we analyzed the Gini importance rankings from the Random Forest algorithm. For the successful Lumbar-Only GCT model (Config 1), the most predictive features were dominated by metrics from the vertical (Z-axis) and anteroposterior (X-axis) accelerometers (<xref ref-type="table" rid="T5">Table 5</xref>; <xref ref-type="fig" rid="F4">Figure 4</xref>). This finding is biomechanically sound, as these axes capture the primary signals related to ground impact and braking/propulsion forces. This increases confidence that the models are learning biomechanically relevant patterns rather than spurious correlations.</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>Top 10 most important features for GCT prediction (Config 1: Lumbar-only).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="left">Rank</th>
<th align="left">Feature Name</th>
<th align="left">Axis</th>
<th align="left">Type</th>
<th align="left">Gini Importance</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="left">1</td>
<td align="left">Accel_Z_variance</td>
<td align="left">Z</td>
<td align="left">Accel</td>
<td align="left">0.187</td>
</tr>
<tr>
<td align="left">2</td>
<td align="left">Accel_X_variance</td>
<td align="left">X</td>
<td align="left">Accel</td>
<td align="left">0.112</td>
</tr>
<tr>
<td align="left">3</td>
<td align="left">Accel_Z_rms</td>
<td align="left">Z</td>
<td align="left">Accel</td>
<td align="left">0.091</td>
</tr>
<tr>
<td align="left">4</td>
<td align="left">Gyro_Y_energy_0-5Hz</td>
<td align="left">Y</td>
<td align="left">Gyro</td>
<td align="left">0.075</td>
</tr>
<tr>
<td align="left">5</td>
<td align="left">Accel_Z_mean</td>
<td align="left">Z</td>
<td align="left">Accel</td>
<td align="left">0.066</td>
</tr>
<tr>
<td align="left">6</td>
<td align="left">Accel_X_rms</td>
<td align="left">X</td>
<td align="left">Accel</td>
<td align="left">0.051</td>
</tr>
<tr>
<td align="left">7</td>
<td align="left">Gyro_Y_variance</td>
<td align="left">Y</td>
<td align="left">Gyro</td>
<td align="left">0.040</td>
</tr>
<tr>
<td align="left">8</td>
<td align="left">Accel_Z_kurtosis</td>
<td align="left">Z</td>
<td align="left">Accel</td>
<td align="left">0.032</td>
</tr>
<tr>
<td align="left">9</td>
<td align="left">Accel_X_mean</td>
<td align="left">X</td>
<td align="left">Accel</td>
<td align="left">0.029</td>
</tr>
<tr>
<td align="left">10</td>
<td align="left">Accel_Y_variance</td>
<td align="left">Y</td>
<td align="left">Accel</td>
<td align="left">0.025</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Feature importance plot.</p>
</caption>
<graphic xlink:href="fbioe-14-1762919-g004.tif">
<alt-text content-type="machine-generated">Horizontal bar chart displaying Gini importance of features. The top three features are Accel_Z_variance at 0.187, Accel_X_variance at 0.112, and Accel_Z_rms at 0.091. The chart ranks ten features in total.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3-5">
<label>3.5</label>
<title>Agreement analysis (Bland-Altman)</title>
<p>To move beyond correlation and assess true agreement, Bland-Altman plots were constructed for the predictions from the optimal configurations. As shown in <xref ref-type="fig" rid="F5">Figure 5</xref> for GCT predicted by Config 3 (Lumbar &#x2b; Ankles), the mean difference (bias) was clinically negligible at <inline-formula id="inf44">
<mml:math id="m47">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>0.8</mml:mn>
<mml:mtext>ms</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>, and the 95% limits of agreement were narrow (<inline-formula id="inf45">
<mml:math id="m48">
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>11.2</mml:mn>
<mml:mtext>ms</mml:mtext>
<mml:mo>,</mml:mo>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>9.6</mml:mn>
<mml:mtext>ms</mml:mtext>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula>). Crucially, the errors were randomly distributed around the mean, indicating no systematic or proportional bias across the range of GCT values. This confirms that the model is robust and accurate, not just for the group average, but for individual step measurements.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Bland-Altman Plot for GCT Agreement (Config 3 vs. Gold Standard).</p>
</caption>
<graphic xlink:href="fbioe-14-1762919-g005.tif">
<alt-text content-type="machine-generated">Scatter plot illustrating the difference in predicted versus actual GCT values (Y-axis) against the mean GCT (X-axis). Mean bias is indicated by a red dashed line at -0.67 ms. The 95% limits of agreement are marked at 9.89 ms and -11.23 ms with dotted lines. Data points are distributed around the mean bias line.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<sec id="s4-1">
<label>4.1</label>
<title>Principal findings and interpretation</title>
<p>The central finding of this study is the remarkable efficacy of a single, lumbosacral-mounted IMU when combined with a machine learning model. Our results compellingly demonstrate that it is possible to accurately predict key running gait parameters without resorting to a complex &#x201c;Christmas tree&#x201d; sensor setup. The ability of the Lumbar-Only configuration to predict cadence, vertical oscillation, and ground contact time with <inline-formula id="inf46">
<mml:math id="m49">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> values exceeding 0.95&#x2014;and with low, non-systematic bias confirmed by Bland-Altman analysis&#x2014;challenges the long-standing assumption that multi-sensor systems are essential for accurate gait analysis. This finding alone has significant implications for consumer-grade wearables, which could be algorithmically upgraded to provide insights previously reserved for laboratory-grade equipment.</p>
<p>However, our study also clearly defines the &#x201c;blind spot&#x201d; of this single-sensor approach: gait asymmetry. The model&#x2019;s complete failure to predict the Gait Symmetry Index (<inline-formula id="inf47">
<mml:math id="m50">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.52</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) is logical and biomechanically grounded. The lumbosacral sensor, positioned near the Center of Mass (CoM), captures the integrated summation of forces from both limbs. This &#x201c;smoothing effect&#x201d; effectively filters out the distinct high-frequency impact transients of individual foot strikes required to calculate bilateral differences. A single central sensor cannot differentiate whether a specific vertical acceleration peak originated from the left or right leg, making it inherently incapable of detecting asymmetry without external context (<xref ref-type="bibr" rid="B30">Sadeghi et al., 2000</xref>). Advanced cross-validation studies confirm that while trunk-mounted sensors effectively capture center-of-mass energy, they often filter out the distal high-frequency transients essential for identifying subtle side-to-side biomechanical discrepancies (<xref ref-type="bibr" rid="B5">Caldas et al., 2017</xref>).</p>
<p>This is precisely why the Lumbar &#x2b; Ankles configuration (Config 3) emerged as the &#x201c;minimal-optimal&#x201d; solution. By adding two ankle sensors&#x2014;the most logical locations to capture discrete foot-ground contact events&#x2014;this 3-sensor setup overcomes the single-sensor&#x2019;s primary limitation. It successfully combines the global-parameter strength of the lumbar sensor with the temporal and asymmetry-detecting strengths of the ankle sensors, achieving high performance (<inline-formula id="inf48">
<mml:math id="m51">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0.91</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) across all measured parameters.</p>
</sec>
<sec id="s4-2">
<label>4.2</label>
<title>Biomechanical interpretation and model trust</title>
<p>The success of our models is rooted in a synergy between biomechanical principles and the pattern-recognition capabilities of machine learning. The lumbar sensor (Config 1) is effective because its position near the body&#x2019;s center of mass (CoM) captures an integrated signal of whole-body motion. The dynamics of the CoM, particularly its vertical and anteroposterior acceleration (which our feature importance analysis confirmed as critical), are a direct reflection of the forces produced by and acting upon the lower limbs (<xref ref-type="bibr" rid="B21">Willy and Davis, 2011</xref>). The ML model effectively &#x201c;decoded&#x201d; this integrated signal to infer parameters like GCT and VO. The ankle sensors (Config 2) were superior for temporal metrics because their signals provide unambiguous, high-amplitude spikes and reversals corresponding to the discrete events of initial contact (IC) and toe-off (TO) (<xref ref-type="bibr" rid="B1">Aminian et al., 2002</xref>). Config 3&#x2019;s success comes from fusing both data streams, allowing the model to see both the &#x201c;whole&#x201d; (CoM) and the &#x201c;parts&#x201d; (individual limbs).</p>
</sec>
<sec id="s4-3">
<label>4.3</label>
<title>Comparison with existing literature and commercial standards</title>
<p>This work extends the existing literature by providing a systematic, data-driven comparison of minimal sensor configurations. Unlike previous research that primarily utilized machine learning for activity classification (e.g., distinguishing walking from running), our study focuses on the precise regression of continuous biomechanical parameters.</p>
<p>Our approach demonstrates distinct advantages over traditional threshold-based algorithms. Studies relying on simple peak-detection methods (e.g., <xref ref-type="bibr" rid="B12">Gonz&#x00E1;lez et al., 2010</xref>) often report increased error rates when foot-strike patterns shift (e.g., from rearfoot to forefoot) at higher speeds. In contrast, our ML-based fusion model dynamically adapts to speed-induced kinematic shifts, maintaining consistent accuracy across the tested range (8&#x2013;12&#xa0;km/h).</p>
<p>Furthermore, the proposed &#x201c;minimal-optimal&#x201d; 3-sensor configuration compares favorably against current commercial wearable solutions. While widely used consumer devices (e.g., Garmin Running Dynamics, Polar) typically report Ground Contact Time errors in the range of 5%&#x2013;10% when compared to force plates (<xref ref-type="bibr" rid="B23">Wouda et al., 2018</xref>; <xref ref-type="bibr" rid="B18">Napier et al., 2015</xref>), our fusion model achieved a MAPE of &#x3c;4%. This demonstrates that by strategically placing just two additional sensors on the ankles to complement the lumbar unit, we can bridge the gap between consumer-grade estimates and laboratory-grade precision.</p>
<p>Our methodological framework&#x2014;using a full gold-standard system to programmatically create and test minimal subsets&#x2014;is a key contribution that can be applied to optimize sensor arrays for any human movement, effectively quantifying the trade-off between hardware complexity and information gain.</p>
</sec>
<sec id="s4-4">
<label>4.4</label>
<title>System feasibility and embedded implementation</title>
<p>The choice of Random Forest provides tangible benefits for embedded implementation. While deep learning techniques have demonstrated exceptional performance in complex biomedical tasks, such as the real-time reconstruction of focal temperature fields (<xref ref-type="bibr" rid="B15">Esteva et al., 2017</xref>) and automated cancer scoring from medical images (<xref ref-type="bibr" rid="B17">Litjens et al., 2017</xref>), they often demand significant computational resources. Unlike Deep Neural Networks (DNNs) or LSTMs, which require computationally expensive matrix multiplications and substantial RAM for activation maps, the RF inference process consists of a series of simple conditional checks (if-else statements). We estimate that the finalized &#x201c;minimal-optimal&#x201d; model (with reduced features) would require less than 200&#xa0;KB of memory and could be executed in microseconds on standard low-power microcontrollers (e.g., ARM Cortex-M4). This low computational footprint allows for &#x201c;on-sensor&#x201d; processing, significantly extending battery life by reducing the need to transmit raw high-frequency data via Bluetooth.</p>
<p>Furthermore, the theoretical justification for choosing RF over recurrent architectures (like LSTMs) lies in our feature engineering strategy. While RF is not inherently a temporal model, the sliding-window approach combined with frequency-domain feature extraction (FFT) effectively &#x201c;encodes&#x201d; the temporal evolution of the gait cycle into the feature space. By explicitly providing the model with temporal proxies&#x2014;such as spectral energy distribution and signal periodicity metrics&#x2014;we compensate for the lack of internal memory states. This allows the RF model to capture the continuous dynamics of steady-state running with accuracy comparable to LSTMs (<inline-formula id="inf49">
<mml:math id="m52">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0.98</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>), but with significantly lower computational latency.</p>
</sec>
<sec id="s4-5">
<label>4.5</label>
<title>Limitations and future directions</title>
<p>Despite the promising results, several limitations must be acknowledged. First, this study was conducted on a treadmill, which provides a homogenous, flat surface. We acknowledge that treadmill running lacks the surface variability and air resistance of overground running, and gait mechanics may differ slightly (<xref ref-type="bibr" rid="B31">Van Hooren et al., 2020</xref>).</p>
<p>Second, the validated speed range (8&#x2013;12&#xa0;km/h) represents steady-state endurance running. It is crucial to note that the proposed &#x201c;minimal-optimal&#x201d; configurations cannot be directly extrapolated to sprinting or high-intensity interval running (&#x3e;15&#xa0;km/h). In sprinting scenarios, gait mechanics undergo fundamental shifts&#x2014;specifically, ground contact times shorten significantly (&#x3c;150&#xa0;ms) and vertical impact forces rise sharply&#x2014;which may require distinct sensor fusion logic and higher sampling frequencies.</p>
<p>Third, regarding the hardware specifications, the IMUs were sampled at 100&#xa0;Hz (10&#xa0;ms temporal resolution). While theoretically limiting for capturing high-speed impacts, our regression-based approach achieved a Mean Absolute Error of &#x3c;2&#xa0;ms for temporal parameters. This sub-sample precision is achievable because the machine learning model learns from the overall morphological features of the signal wave (e.g., spectral energy, variance) rather than relying on simple, grid-bound peak detection methods. However, at the highest tested speed (12&#xa0;km/h), we observed a marginal increase in error. This is likely attributable to Soft Tissue Artifacts (STA)&#x2014;the secondary motion of the sensor relative to the bone caused by skin deformation during high-impact landing&#x2014;rather than the temporal resolution itself. Future hardware implementations should prioritize more rigid mounting solutions to mitigate this mechanical noise.</p>
<p>However, this controlled environment was a necessary prerequisite to establish an &#x201c;algorithmic ground truth&#x201d; and validate the sensor fusion logic against the optical gold standard before introducing the environmental noise of outdoor scenarios. Future studies must validate these models in &#x201c;in-the-wild&#x201d; scenarios with variable terrain and slopes. Preliminary field studies suggest that terrain variability significantly increases the non-linear noise in IMU signals, necessitating more sophisticated sensor fusion architectures to maintain the <inline-formula id="inf50">
<mml:math id="m53">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:math>
</inline-formula> levels observed in controlled laboratory settings (<xref ref-type="bibr" rid="B8">Norris et al., 2014</xref>).</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<label>5</label>
<title>Conclusion</title>
<p>This study successfully demonstrated that a machine learning approach can determine a minimal-optimal IMU configuration for running gait analysis, effectively bridging the gap between convenience and accuracy. We confirmed that while a single lumbosacral IMU is surprisingly powerful, it is blind to asymmetry. We identified a three-sensor configuration (lumbosacral &#x2b; bilateral ankles) as the minimal-optimal solution, capable of accurately predicting a comprehensive suite of global, temporal, and symmetry-based running gait parameters (<inline-formula id="inf51">
<mml:math id="m54">
<mml:mrow>
<mml:msup>
<mml:mi>R</mml:mi>
<mml:mn>2</mml:mn>
</mml:msup>
<mml:mo>&#x3e;</mml:mo>
<mml:mn>0.91</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>; <inline-formula id="inf52">
<mml:math id="m55">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mi>A</mml:mi>
<mml:mi>P</mml:mi>
<mml:mi>E</mml:mi>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>8</mml:mn>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> for all). These findings, supported by strong correlational evidence and robust agreement analysis, provide a data-driven blueprint for the next-generation of smart, low-cost, and user-friendly wearable devices, paving the way for the democratization of advanced biomechanical analysis in sport and health.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Ethics Committee of Xuzhou University of Technology (Approval No. XZUT-2024-018). The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>YeY: Conceptualization, Data curation, Formal Analysis, Investigation, Methodology, Resources, Software, Writing &#x2013; original draft. YaY: Writing &#x2013; review and editing, Data curation, Formal Analysis, Visualization, Software. SC: Conceptualization, Investigation, Methodology, Project administration, Software, Supervision, Visualization, Writing &#x2013; review and editing. WC: Writing &#x2013; review and editing, Supervision, Conceptualization, Project administration.</p>
</sec>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<fn-group>
<fn fn-type="custom" custom-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1281265/overview">Datao Xu</ext-link>, Ningbo University, China</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1897913/overview">Feilong Zhu</ext-link>, Beijing Sport University, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2664036/overview">Arshad Sher</ext-link>, Nottingham Trent University, United Kingdom</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3251075/overview">Yao Sun</ext-link>, China University of Mining and Technology, China</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aminian</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Najafi</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>B&#xfc;la</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Leyvraz</surname>
<given-names>P. F.</given-names>
</name>
<name>
<surname>Robert</surname>
<given-names>P.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Spatio-temporal parameters of gait measured by an ambulatory system using miniature gyroscopes</article-title>. <source>J. Biomechanics</source> <volume>35</volume> (<issue>5</issue>), <fpage>689</fpage>&#x2013;<lpage>699</lpage>. <pub-id pub-id-type="doi">10.1016/s0021-9290(02)00008-8</pub-id>
<pub-id pub-id-type="pmid">11955509</pub-id>
</mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Benson</surname>
<given-names>L. C.</given-names>
</name>
<name>
<surname>Clermont</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Bo&#x161;njak</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Ferber</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Determining the minimal number of wearable sensors for the classification of running gait patterns</article-title>. <source>J. Sports Sci.</source> <volume>41</volume> (<issue>5</issue>), <fpage>458</fpage>&#x2013;<lpage>467</lpage>.</mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Bishop</surname>
<given-names>C. M.</given-names>
</name>
</person-group> (<year>2006</year>). <source>Pattern recognition and machine learning</source>. <publisher-name>Springer</publisher-name>.</mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Breiman</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Random forests</article-title>. <source>Mach. Learn.</source> <volume>45</volume> (<issue>1</issue>), <fpage>5</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id>
</mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Caldas</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Mundt</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Potthast</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>de Lima Neto</surname>
<given-names>F. B.</given-names>
</name>
<name>
<surname>Markert</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>A systematic review of gait analysis methods based on inertial sensors and adaptive algorithms</article-title>. <source>Gait Posture</source> <volume>57</volume>, <fpage>204</fpage>&#x2013;<lpage>210</lpage>. <pub-id pub-id-type="doi">10.1016/j.gaitpost.2017.06.019</pub-id>
<pub-id pub-id-type="pmid">28666178</pub-id>
</mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Davis</surname>
<given-names>I. S.</given-names>
</name>
<name>
<surname>Powers</surname>
<given-names>C. M.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Patellofemoral pain syndrome: proximal, distal, and local factors</article-title>. <source>J. Orthop. and Sports Phys. Ther.</source> <volume>40</volume> (<issue>3</issue>), <fpage>A1</fpage>&#x2013;<lpage>A16</lpage>. <pub-id pub-id-type="doi">10.2519/jospt.2010.0302</pub-id>
<pub-id pub-id-type="pmid">20195028</pub-id>
</mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dutta</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Gait event detection in running using a single wearable sensor: a deep learning approach</article-title>. <source>IEEE Sensors J.</source> <volume>20</volume> (<issue>22</issue>), <fpage>13351</fpage>&#x2013;<lpage>13358</lpage>.</mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Esteva</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kuprel</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Novoa</surname>
<given-names>R. A.</given-names>
</name>
<name>
<surname>Ko</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Swetter</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Blau</surname>
<given-names>H. M.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Dermatologist-level classification of skin cancer with deep neural networks</article-title>. <source>Nature</source> <volume>542</volume> (<issue>7639</issue>), <fpage>115</fpage>&#x2013;<lpage>118</lpage>. <pub-id pub-id-type="doi">10.1038/nature21056</pub-id>
<pub-id pub-id-type="pmid">28117445</pub-id>
</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Figo</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Diniz</surname>
<given-names>P. C.</given-names>
</name>
<name>
<surname>Ferreira</surname>
<given-names>D. R.</given-names>
</name>
<name>
<surname>Cardoso</surname>
<given-names>J. M. P.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Preprocessing techniques for activity recognition from accelerometer data</article-title>. <source>Personal Ubiquitous Comput.</source> <volume>14</volume> (<issue>7</issue>), <fpage>645</fpage>&#x2013;<lpage>662</lpage>. <pub-id pub-id-type="doi">10.1007/s00779-010-0293-9</pub-id>
</mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hannink</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kautz</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Pasluosta</surname>
<given-names>C. F.</given-names>
</name>
<name>
<surname>Ga&#x00df;mann</surname>
<given-names>K. G.</given-names>
</name>
<name>
<surname>Klucken</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Eskofier</surname>
<given-names>B. M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Sensor-based gait parameter extraction with deep convolutional neural networks</article-title>. <source>IEEE J. Biomed. Health Inform.</source> <volume>21</volume> (<issue>1</issue>), <fpage>85</fpage>&#x2013;<lpage>93</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2016.2636456</pub-id>
<pub-id pub-id-type="pmid">28103196</pub-id>
</mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gonz&#x00E1;lez</surname>
<given-names>R. C.</given-names>
</name>
<name>
<surname>L&#x00F3;pez</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Alvarez</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Alvarez</surname>
<given-names>J. C.</given-names>
</name>
<name>
<surname>Vieri</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2010</year>). <article-title>Real-time gait event detection for normal subjects from lower trunk accelerations</article-title>. <source>Gait Posture</source> <volume>31</volume> (<issue>3</issue>), <fpage>322</fpage>&#x2013;<lpage>325</lpage>. <pub-id pub-id-type="doi">10.1016/j.gaitpost.2009.11.014</pub-id>
<pub-id pub-id-type="pmid">20034797</pub-id>
</mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hreljac</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Impact and overuse injuries in runners</article-title>. <source>Med. and Sci. Sports and Exerc.</source> <volume>36</volume> (<issue>5</issue>), <fpage>845</fpage>&#x2013;<lpage>849</lpage>. <pub-id pub-id-type="doi">10.1249/01.mss.0000126803.66636.dd</pub-id>
<pub-id pub-id-type="pmid">15126720</pub-id>
</mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kaufman</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Emmerling</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Hilliges</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Black</surname>
<given-names>M. J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Deep inertial poser: Learning to reconstruct human pose from sparse inertial measurements in real time</article-title>. <source>ACM Trans. Graph. (TOG)</source> <volume>37</volume> (<issue>6</issue>), <fpage>1</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1145/3272127.3275108</pub-id>
</mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>D. C.</given-names>
</name>
<name>
<surname>Pate</surname>
<given-names>R. R.</given-names>
</name>
<name>
<surname>Lavie</surname>
<given-names>C. J.</given-names>
</name>
<name>
<surname>Sui</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Church</surname>
<given-names>T. S.</given-names>
</name>
<name>
<surname>Blair</surname>
<given-names>S. N.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Leisure-time running reduces all-cause and cardiovascular mortality risk</article-title>. <source>J. Am. Coll. Cardiol.</source> <volume>64</volume> (<issue>5</issue>), <fpage>472</fpage>&#x2013;<lpage>481</lpage>. <pub-id pub-id-type="doi">10.1016/j.jacc.2014.04.058</pub-id>
<pub-id pub-id-type="pmid">25082581</pub-id>
</mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Litjens</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Kooi</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Bejnordi</surname>
<given-names>B. E.</given-names>
</name>
<name>
<surname>Setio</surname>
<given-names>A. A. A.</given-names>
</name>
<name>
<surname>Ciompi</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Ghafoorian</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>A survey on deep learning in medical image analysis</article-title>. <source>Med. Image Anal.</source> <volume>42</volume>, <fpage>60</fpage>&#x2013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1016/j.media.2017.07.005</pub-id>
<pub-id pub-id-type="pmid">28778026</pub-id>
</mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Napier</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>MacLean</surname>
<given-names>C. L.</given-names>
</name>
<name>
<surname>Maurer</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Taunton</surname>
<given-names>T. E.</given-names>
</name>
<name>
<surname>Hunt</surname>
<given-names>M. A.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Kinetic risk factors of running-related injuries in female recreational runners</article-title>. <source>Scand. J. Med. Sci. Sports</source> <volume>25</volume> (<issue>6</issue>), <fpage>813</fpage>&#x2013;<lpage>819</lpage>. </mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Norris</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Anderson</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Kenny</surname>
<given-names>I. C.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Method analysis of accelerometers and gyroscopes in running gait: A systematic review</article-title>. <source>Proc. Inst. Mech. Eng. Pt. P J. Sports Eng. Tech.</source> <volume>228</volume> (<issue>1</issue>), <fpage>3</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1177/1754337113502472</pub-id>
</mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ord&#x00F3;&#x00F1;ez</surname>
<given-names>F. J.</given-names>
</name>
<name>
<surname>Roggen</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Deep convolutional and LSTM recurrent neural networks for multimodal wearable activity recognition</article-title>. <source>Sensors</source> <volume>16</volume> (<issue>1</issue>), <fpage>115</fpage>. <pub-id pub-id-type="doi">10.3390/s16010115</pub-id>
<pub-id pub-id-type="pmid">26797612</pub-id>
</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Poitras</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Dupuis</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Lussier</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>B&#xe9;dard</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Validation of the xsens MVN awinda system for 3D running analysis</article-title>. <source>Gait and Posture</source> <volume>68</volume>, <fpage>260</fpage>&#x2013;<lpage>264</lpage>. <pub-id pub-id-type="doi">10.3390/s19071555</pub-id>
</mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ronao</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Cho</surname>
<given-names>S. B.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Human activity recognition with smartphone sensors using deep learning neural networks</article-title>. <source>Expert Syst. Appl.</source> <volume>59</volume>, <fpage>235</fpage>&#x2013;<lpage>244</lpage>. <pub-id pub-id-type="doi">10.1016/j.eswa.2016.04.032</pub-id>
</mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sadeghi</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Allard</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Prince</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Labelle</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Symmetry and limb dominance in able-bodied gait: a review</article-title>. <source>Gait Posture</source> <volume>12</volume> (<issue>1</issue>), <fpage>34</fpage>&#x2013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.1016/S0966-6362(00)00070-9</pub-id>
<pub-id pub-id-type="pmid">10996295</pub-id>
</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Taunton</surname>
<given-names>J. E.</given-names>
</name>
<name>
<surname>Ryan</surname>
<given-names>M. B.</given-names>
</name>
<name>
<surname>Clement</surname>
<given-names>D. B.</given-names>
</name>
<name>
<surname>McKenzie</surname>
<given-names>D. C.</given-names>
</name>
<name>
<surname>Lloyd-Smith</surname>
<given-names>D. R.</given-names>
</name>
<name>
<surname>Zumbo</surname>
<given-names>B. D.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>A retrospective study of running injuries</article-title>. <source>Br. J. Sports Med.</source> <volume>36</volume> (<issue>2</issue>), <fpage>95</fpage>&#x2013;<lpage>101</lpage>. <pub-id pub-id-type="doi">10.1136/bjsm.36.2.95</pub-id>
<pub-id pub-id-type="pmid">11916889</pub-id>
</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>van Gent</surname>
<given-names>R. N.</given-names>
</name>
<name>
<surname>Siem</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>van Middelkoop</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>van Os</surname>
<given-names>A. G.</given-names>
</name>
<name>
<surname>Bierma-Zeinstra</surname>
<given-names>S. M. A.</given-names>
</name>
<name>
<surname>Koes</surname>
<given-names>B. W.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Incidence and determinants of lower extremity running injuries in long distance runners: a systematic review</article-title>. <source>Br. J. Sports Med.</source> <volume>41</volume> (<issue>8</issue>), <fpage>469</fpage>&#x2013;<lpage>480</lpage>. <pub-id pub-id-type="doi">10.1136/bjsm.2006.033548</pub-id>
<pub-id pub-id-type="pmid">17473005</pub-id>
</mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Van Hooren</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Fuller</surname>
<given-names>J. T.</given-names>
</name>
<name>
<surname>Buckley</surname>
<given-names>J. D.</given-names>
</name>
<name>
<surname>Miller</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Sewell</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Rao</surname>
<given-names>G.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Is motorized treadmill running biomechanically comparable to overground running? A systematic review and meta-analysis of cross-over studies</article-title>. <source>Sports Med.</source> <volume>50</volume>, <fpage>785</fpage>&#x2013;<lpage>813</lpage>. <pub-id pub-id-type="doi">10.1007/s40279-019-01237-z</pub-id>
<pub-id pub-id-type="pmid">31802395</pub-id>
</mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Willy</surname>
<given-names>R. W.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>I. S.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>The effect of a single-session biofeedback intervention on running mechanics and injury-related variables</article-title>. <source>J. Orthop. and Sports Phys. Ther.</source> <volume>41</volume> (<issue>7</issue>), <fpage>481</fpage>&#x2013;<lpage>489</lpage>. <pub-id pub-id-type="doi">10.2519/jospt.2011.3470</pub-id>
</mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Winter</surname>
<given-names>D. A.</given-names>
</name>
</person-group> (<year>2009</year>). <source>Biomechanics and motor control of human movement</source>. <edition>4th ed</edition>. <publisher-name>John Wiley and Sons</publisher-name>. <pub-id pub-id-type="doi">10.3389/fphys.2018.00218</pub-id>
</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wouda</surname>
<given-names>F. J.</given-names>
</name>
<name>
<surname>van der Slikke</surname>
<given-names>R. M.</given-names>
</name>
<name>
<surname>van den Heuvel</surname>
<given-names>E. R.</given-names>
</name>
<name>
<surname>Zijlstra</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Estimating spatio-temporal gait parameters from a single inertial sensor at the trunk: a machine learning approach</article-title>. <source>J. Biomechanics</source> <volume>74</volume>, <fpage>1</fpage>&#x2013;<lpage>7</lpage>.</mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Quan</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Baker</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Gu</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Explaining the differences of gait patterns between high and low mileage runners with machine learning</article-title>. <source>Sci. Rep.</source> <volume>12</volume>, <fpage>2981</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-07054-1</pub-id>
<pub-id pub-id-type="pmid">35194121</pub-id>
</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Xu</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Jie</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Jemni</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2025</year>). <article-title>Data-driven deep learning for predicting ligament fatigue failure risk mechanisms</article-title>. <source>Int. J. Mech. Sci.</source> <volume>301</volume>, <fpage>110519</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijmecsci.2025.110519</pub-id>
</mixed-citation>
</ref>
</ref-list>
</back>
</article>