<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Bioeng. Biotechnol.</journal-id>
<journal-title>Frontiers in Bioengineering and Biotechnology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Bioeng. Biotechnol.</abbrev-journal-title>
<issn pub-type="epub">2296-4185</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1350135</article-id>
<article-id pub-id-type="doi">10.3389/fbioe.2024.1350135</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Bioengineering and Biotechnology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Enhancing biomechanical machine learning with limited data: generating realistic synthetic posture data using generative artificial intelligence</article-title>
<alt-title alt-title-type="left-running-head">Dindorf et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fbioe.2024.1350135">10.3389/fbioe.2024.1350135</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Dindorf</surname>
<given-names>Carlo</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2596437/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Dully</surname>
<given-names>Jonas</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2353425/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Konradi</surname>
<given-names>J&#xfc;rgen</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/176632/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wolf</surname>
<given-names>Claudia</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Becker</surname>
<given-names>Stephan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2352538/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Simon</surname>
<given-names>Steven</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Huthwelker</surname>
<given-names>Janine</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Werthmann</surname>
<given-names>Frederike</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kniepert</surname>
<given-names>Johanna</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Drees</surname>
<given-names>Philipp</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Betz</surname>
<given-names>Ulrich</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name>
<surname>Fr&#xf6;hlich</surname>
<given-names>Michael</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/507160/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Sports Science</institution>, <institution>University of Kaiserslautern-Landau</institution>, <addr-line>Kaiserslautern</addr-line>, <country>Germany</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Institute of Physical Therapy</institution>, <institution>Prevention and Rehabilitation</institution>, <institution>University Medical Centre</institution>, <institution>Johannes Gutenberg University Mainz</institution>, <addr-line>Mainz</addr-line>, <country>Germany</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Department of Orthopedics and Trauma Surgery</institution>, <institution>University Medical Centre</institution>, <institution>Johannes Gutenberg University Mainz</institution>, <addr-line>Mainz</addr-line>, <country>Germany</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/824868/overview">Zhen (Jeff) Luo</ext-link>, University of Technology Sydney, Australia</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2095893/overview">Tianzhe Bao</ext-link>, University of Health and Rehabilitation Sciences, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1679741/overview">Chang Won Jeong</ext-link>, Wonkwang University, Republic of Korea</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Carlo Dindorf, <email>carlo.dindorf@rptu.de</email>
</corresp>
<fn fn-type="equal" id="fn001">
<label>
<sup>&#x2020;</sup>
</label>
<p>These authors have contributed equally to this work and share senior authorship</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>14</day>
<month>02</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>12</volume>
<elocation-id>1350135</elocation-id>
<history>
<date date-type="received">
<day>05</day>
<month>12</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>22</day>
<month>01</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Dindorf, Dully, Konradi, Wolf, Becker, Simon, Huthwelker, Werthmann, Kniepert, Drees, Betz and Fr&#xf6;hlich.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Dindorf, Dully, Konradi, Wolf, Becker, Simon, Huthwelker, Werthmann, Kniepert, Drees, Betz and Fr&#xf6;hlich</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>
<bold>Objective:</bold> Biomechanical Machine Learning (ML) models, particularly deep-learning models, demonstrate the best performance when trained using extensive datasets. However, biomechanical data are frequently limited due to diverse challenges. Effective methods for augmenting data in developing ML models, specifically in the human posture domain, are scarce. Therefore, this study explored the feasibility of leveraging generative artificial intelligence (AI) to produce realistic synthetic posture data by utilizing three-dimensional posture data.</p>
<p>
<bold>Methods:</bold> Data were collected from 338 subjects through surface topography. A Variational Autoencoder (VAE) architecture was employed to generate and evaluate synthetic posture data, examining its distinguishability from real data by domain experts, ML classifiers, and Statistical Parametric Mapping (SPM). The benefits of incorporating augmented posture data into the learning process were exemplified by a deep autoencoder (AE) for automated feature representation.</p>
<p>
<bold>Results:</bold> Our findings highlight the challenge of differentiating synthetic data from real data for both experts and ML classifiers, underscoring the quality of synthetic data. This observation was also confirmed by SPM. By integrating synthetic data into AE training, the reconstruction error can be reduced compared to using only real data samples. Moreover, this study demonstrates the potential for reduced latent dimensions, while maintaining a reconstruction accuracy comparable to AEs trained exclusively on real data samples.</p>
<p>
<bold>Conclusion:</bold> This study emphasizes the prospects of harnessing generative AI to enhance ML tasks in the biomechanics domain.</p>
</abstract>
<kwd-group>
<kwd>machine learning</kwd>
<kwd>deep learning</kwd>
<kwd>spine</kwd>
<kwd>variational autoencoder</kwd>
<kwd>data augmentation</kwd>
<kwd>statistical parametric mapping</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Biomechanics</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<title>1 Introduction</title>
<p>Biomechanics, the study of human movement and its mechanical principles, holds great promise for advancing our understanding of human locomotion, aiding clinical diagnoses, and enhancing athletic performance (<xref ref-type="bibr" rid="B4">Barnes and Kilding, 2015</xref>; <xref ref-type="bibr" rid="B17">Ferreira et al., 2016</xref>; <xref ref-type="bibr" rid="B8">Ceyssens et al., 2019</xref>; <xref ref-type="bibr" rid="B55">Valamatos et al., 2022</xref>). In biomechanical data analysis, Artificial Intelligence (AI) and Machine Learning (ML) methods have gained traction (<xref ref-type="bibr" rid="B18">Halilaj et al., 2018</xref>; <xref ref-type="bibr" rid="B47">Phinyomark et al., 2018</xref>; <xref ref-type="bibr" rid="B11">Dindorf et al., 2022a</xref>), yielding promising results, such as in studies involving post-stroke patients (<xref ref-type="bibr" rid="B33">Lau et al., 2009</xref>) or Parkinson&#x2019;s disease (<xref ref-type="bibr" rid="B56">Wahid et al., 2015</xref>). These approaches excel in handling intricate, multidimensional data, offering objective insights, and pinpointing distinctive group-specific disparities (<xref ref-type="bibr" rid="B20">Horst et al., 2019</xref>; <xref ref-type="bibr" rid="B15">Dindorf et al., 2021a</xref>). Notably, these methods often outperform traditional statistical analysis methods in related databases (<xref ref-type="bibr" rid="B7">Bzdok et al., 2018</xref>; <xref ref-type="bibr" rid="B18">Halilaj et al., 2018</xref>; <xref ref-type="bibr" rid="B47">Phinyomark et al., 2018</xref>). However, their potential is frequently constrained by persistent challenges such as data scarcity.</p>
<p>Data scarcity refers to a situation in which the available data for analysis or decision-making are limited in quantity, quality, or relevance, often presenting challenges in drawing meaningful insights or conclusions (<xref ref-type="bibr" rid="B2">Alzubaidi et al., 2023</xref>). Unlike certain fields, such as image classification, which benefit from vast databases containing millions of images (<xref ref-type="bibr" rid="B10">Deng et al., 2009</xref>), biomechanical data frequently encounter limitations, typically comprising only hundreds or a few thousand data points (<xref ref-type="bibr" rid="B21">Horst et al., 2021</xref>). These limitations stem from various challenges, including difficulties in participant recruitment, resource constraints, ethical considerations, specialized expertise requirements, and the often expensive and intricate nature of the measurements. Consequently, the development and effectiveness of ML algorithms tailored to biomechanical tasks are impeded by the lack of comprehensive datasets.</p>
<p>Data augmentation is a widely used technique in ML and data science, aimed at artificially expanding the size of a dataset by applying various transformations or modifications to existing data (<xref ref-type="bibr" rid="B6">Bicer et al., 2022</xref>). The primary objective of data augmentation is to diversify the training dataset, making it more robust, and reducing overfitting (<xref ref-type="bibr" rid="B32">Lashgari et al., 2020</xref>). By introducing variations in the data, the model becomes better at generalizing to unseen examples, consequently enhancing its performance on real-world data. The utilization of data augmentation in ML improves a model&#x2019;s capacity for generalization, which is particularly pronounced in deep learning scenarios (<xref ref-type="bibr" rid="B6">Bicer et al., 2022</xref>). For example, in computer vision tasks, data augmentation may encompass randomly rotating or flipping images, changing their color balance, or cropping them differently (Jiang et al., 2020). Similarly, natural language processing techniques can involve paraphrasing sentences, adding synonyms, or introducing typographical errors into the text data (<xref ref-type="bibr" rid="B27">Kang et al., 2021</xref>; <xref ref-type="bibr" rid="B5">Bayer et al., 2023</xref>).</p>
<p>However, in biomechanics, kinematic data are often presented as tabular or time-series data for dynamic measurements (<xref ref-type="bibr" rid="B21">Horst et al., 2021</xref>). In the domain of clinical gait analysis, certain techniques such as magnitude perturbation, temporal perturbation, random rotation, and noise injection have been employed (<xref ref-type="bibr" rid="B28">Kiprijanovska et al., 2020</xref>; <xref ref-type="bibr" rid="B54">Tunca et al., 2020</xref>; <xref ref-type="bibr" rid="B45">Paragliola and Coronato, 2021</xref>). Alternatively, data augmentation for tabular data may involve generating additional samples by interpolating between existing data points or by applying sampling techniques primarily used for imbalanced datasets (for example, the synthetic minority oversampling technique: SMOTE) (<xref ref-type="bibr" rid="B15">Dindorf et al., 2021a</xref>; <xref ref-type="bibr" rid="B26">Iglesias et al., 2023</xref>).</p>
<p>Furthermore, there exists considerable promise in leveraging generative models for data generation purposes. Generative models such as Variational Autoencoders (VAEs), Generative Adversarial Networks (GANs), and autoregressive models like transformer-based models represent powerful ML models capable of creating new data samples that closely resemble the training data to which they were exposed (<xref ref-type="bibr" rid="B6">Bicer et al., 2022</xref>). These models learn the underlying data distributions and generate data points with similar characteristics. This makes them valuable not only for data augmentation but also for content generation (<xref ref-type="bibr" rid="B24">Hussain et al., 2020</xref>) and anomaly detection (<xref ref-type="bibr" rid="B59">Yang et al., 2022</xref>). Regarding data augmentation, the synthetic data generated by these models can be combined with the original data, resulting in a larger and diversified dataset for training ML models.</p>
<p>Several studies have explored the application of generative models in analyzing human movement data, highlighting the potential of generative models in the biomechanical domain. Researchers have developed (<xref ref-type="bibr" rid="B52">Takeishi and Kalousis, 2021</xref>) a generative model for the human gait that ensures physically realistic outputs by integrating a VAE with a differentiable physics engine, demonstrating its efficacy in gait style transfer. Similarly, <xref ref-type="bibr" rid="B34">Liu et al. (2020)</xref> employed a conditional GAN to replicate the kinematic attributes of individuals with lateral collateral ligament injuries in their feet and ankles. Additionally, <xref ref-type="bibr" rid="B36">Luo and Tjahjadi, (2020)</xref> utilized conditional GANs to create a parametric three-dimensional (3D) model of the human body, including an underlying skeleton, enabling the synthesis of asymmetrical gait samples. Furthermore, <xref ref-type="bibr" rid="B51">Song et al. (2020)</xref> harnessed a Deep Convolutional GAN to create binary images that captured three distinct abnormal gait patterns, encompassing falls, reels, and drags.</p>
<p>Although several studies have emphasized the utility of generative AI in the domain of gait data, only one has addressed posture analysis using 3D spinal computed tomography scans of the lumbar spine (<xref ref-type="bibr" rid="B22">Huang and Zhang, 2023</xref>). In response to this pressing issue, we explored whether generative AI can bridge the gap in data scarcity by creating synthetic yet realistic stereographic 3D spinal posture data. By leveraging the capabilities of the VAE, we embarked on the task of generating synthetic posture data. The goal is not only to evaluate whether it is possible to train a VAE on posture data and generate synthetic data, but also to scrutinize whether these synthesized postures can be discerned from genuine data by means of Statistical Parametric Mapping (SPM) and a classification task challenging both domain experts and ML classifiers. Furthermore, this study extends beyond data generation. We explored the practical implications of incorporating synthetic data into the learning process. A critical aspect of this inquiry is the use of an autoencoder (AE) for feature learning based on posture data.</p>
<p>AEs are widely used for denoising tasks in clinical biomechanical data. Previous studies (<xref ref-type="bibr" rid="B41">Mohammadian Rad et al., 2018</xref>; <xref ref-type="bibr" rid="B16">Elkholy et al., 2019</xref>) have demonstrated their effectiveness in improving the discriminative capabilities of models. In various domains, it has been observed that feeding features reconstructed by AEs to a discriminative model as input often yields superior accuracy compared with using the original data (<xref ref-type="bibr" rid="B38">Marchi et al., 2015</xref>; <xref ref-type="bibr" rid="B65">Zhao T. et al., 2017</xref>; <xref ref-type="bibr" rid="B53">Tu et al., 2020</xref>). The latent space of the AE proves to is a valuable resource for automatic feature extraction, a technique that has shown significant utility in other studies (<xref ref-type="bibr" rid="B43">Nguyen et al., 2018</xref>; <xref ref-type="bibr" rid="B62">Zaroug et al., 2020</xref>; <xref ref-type="bibr" rid="B58">Yang and Yin, 2021</xref>). For example, by utilizing latent space in conjunction with other ML models, enhanced performance in various tasks has been demonstrated (<xref ref-type="bibr" rid="B19">Hernandez et al., 2020</xref>).</p>
<p>Given the pivotal role of AEs in biomechanical data analysis, enhancing their reconstruction accuracy holds immense value. Consequently, we sought to elucidate whether augmenting the training dataset with generated synthetic postures can lead to reduced reconstruction errors and a more compact feature representation of an AE without sacrificing reconstruction accuracy.</p>
</sec>
<sec sec-type="materials|methods" id="s2">
<title>2 Materials and methods</title>
<p>The comprehensive workflow is outlined in <xref ref-type="fig" rid="F1">Figure 1</xref> for a concise overview. Subsequent sections will furnish detailed insights into each step delineated in the figure.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Overall workflow of the study. The top left image illustrates the DIERS formetric III 4D&#x2122; system&#x2019;s (DIERS International GmbH, Schlangenbad, Germany) measurement procedure (originally from (<xref ref-type="bibr" rid="B14">Dindorf et al., 2022b</xref>), courtesy of DIERS International GmbH). The original data pool is expanded using a Variational Autoencoder (VAE) (upper right) to address sample size limitations for diverse Machine Learning tasks. This is followed by task-specific model development, exemplified here by a deep Autoencoder, utilizing the augmented data (bottom).</p>
</caption>
<graphic xlink:href="fbioe-12-1350135-g001.tif"/>
</fig>
<sec id="s2-1">
<title>2.1 Subjects and data acquisition</title>
<p>In four separate studies, data were collected from 353 participants. Depending on the study design, as outlined in <xref ref-type="table" rid="T1">Table 1</xref>, each subject underwent postural data collection for the spine on one or three distinct days. During each session, an average of 12&#x2013;14 individual images was captured for each subject. This data collection encompassed both healthy individuals and those with various pathologies, such as back pain, spinal fusion, and osteoarthritis. The DIERS formetric III 4D&#x2122; system, specifically DICAM v3.7 analyzing software (DIERS International GmbH, Schlangenbad, Germany), was employed as a non-invasive means of rasterstereography, also known as surface topography (ST). Detailed information regarding the participants&#x2019; characteristics is presented in <xref ref-type="table" rid="T1">Table 1</xref>. This method enables comprehensive spinal measurements across all body planes without requiring invasive radiation-based techniques or extensive preparation.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Subject characteristics and related trials.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center"/>
<th align="center">Subjects (<italic>n</italic>)</th>
<th align="center">Male (<italic>n</italic>); Female (<italic>n</italic>)</th>
<th align="center">Age (years)</th>
<th align="center">Hight (cm)</th>
<th align="center">BMI (kg/m<sup>2</sup>)</th>
<th align="center">Further information</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Healthy <xref ref-type="table-fn" rid="Tfn1">
<sup>a</sup>
</xref> (asymptomatic)</td>
<td align="center">201</td>
<td align="center">69; 132</td>
<td align="center">41.28 (13.42)</td>
<td align="center">172.51 (8.19)</td>
<td align="center">23.49 (3.21)</td>
<td align="center">18&#x2013;70 years; free of pain; no history of surgery or fracture between C7 and pelvis; no medical or therapeutic treatment (C7- pelvis) last 12 months; no medical or therapeutic treatment due to musculoskeletal problems (musculoskeletal system except C7-pelvis) last 6 months; BMI &#x2264;30.0; gait stability; an age- and sex-accorded walking speed and spinal function as well as an appropriate joint mobility to theoretically be able to perform a physiological gait pattern; WHO register (INT: DRKS00010834)</td>
</tr>
<tr>
<td align="center">Healthy <xref ref-type="table-fn" rid="Tfn2">
<sup>b</sup>
</xref> (asymptomatic)</td>
<td align="center">25</td>
<td align="center">12; 13</td>
<td align="center">34.68 (12.07)</td>
<td align="center">176.28 (8.83)</td>
<td align="center">24.01 (3.45)</td>
<td align="center">Repeated measurements at three points in time; walking without walking aids and pain; no acute or chronic diseases; no pregnancy; BMI &#x3c;30; WHO register (INT: DRKS00014325)</td>
</tr>
<tr style="background-color:#F9F9F9">
<td align="center">Back pain</td>
<td align="center">32</td>
<td align="center">14; 18</td>
<td align="center">44.53 (14.84)</td>
<td align="center">174.00 (11.00)</td>
<td align="center">26.01 (4.79)</td>
<td align="center">Area of pain: 6% thoracic spine (TS), 72% lumbar spine (LS), and 22% TS &#x2b; LS; no acute fractures, walking restraints, or acute/chronic illnesses that prevent safe walking; WHO register (INT: DRKS00013145)</td>
</tr>
<tr>
<td align="center">Spinal fusion</td>
<td align="center">34</td>
<td align="center">20; 14</td>
<td align="center">56.26 (15.40)</td>
<td align="center">171.00 (11.00)</td>
<td align="center">26.95 (4.43)</td>
<td align="center">Spinal fusion somewhere between C7 and L5; no acute fractures, walking restraints, or acute/chronic illnesses that prevent safe walking; WHO register (INT: DRKS00013145)</td>
</tr>
<tr style="background-color:#F9F9F9">
<td align="center">Osteoarthritis</td>
<td align="center">60</td>
<td align="center">29; 31</td>
<td align="center">64.00 (11.27)</td>
<td align="center">171.00 (9.15)</td>
<td align="center">25.68 (2.35)</td>
<td align="center">30 knee osteoarthritis and 30 hip osteoarthritis; walking without walking aids; no walking impairments that prevent safe walking; no acute or chronic diseases; no pelvic or spinal surgery; no pregnancy; BMI &#x3c;30; WHO register (INT: DRKS00017240)</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="Tfn1">
<label>
<sup>a</sup>
</label>
<p>The dataset is part of the dissertation project of Janine Huthwelker. For more details see (<xref ref-type="bibr" rid="B25">Huthwelker et al., 2023</xref>).</p>
</fn>
<fn id="Tfn2">
<label>
<sup>b</sup>
</label>
<p>The dataset is part of the dissertation project of Friederike Werthmann.</p>
</fn>
<fn>
<p>Abbreviations: BMI: body mass index, SD: standard deviations, WHO: world health organization, TS: thoracic spine, LS: lumbar spine, C: cervical, L: lumbal.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>We utilized fifty-four static parameters from the system, including measurements such as pelvic obliquity (&#xb0;), pelvic inclination (dimples) (&#xb0;), pelvic rotation (&#xb0;), as well as the orientation of VP, T1&#x2013;12, and L1&#x2013;L4 in all planes (&#xb0;), as part of our modeling process. <xref ref-type="sec" rid="s12">Supplementary Table S1</xref> provides a comprehensive description of these parameters.</p>
<p>Subsequently, for each participant, we randomly selected three samples without replacement for further calculations. We employed the isolation forest technique (500 trees) to effectively identify and address multivariate outliers. This approach has been demonstrated to be effective in various studies involving kinematic data (<xref ref-type="bibr" rid="B12">Dindorf et al., 2021b</xref>; <xref ref-type="bibr" rid="B60">Yee et al., 2021</xref>). Consequently, from our initial dataset of 1059 samples, we removed 66 outliers using this method, resulting in a final total of 993 samples, derived from 338 subjects for further analysis.</p>
<p>Although multiple classes of healthy subjects and pathologies were present (<xref ref-type="table" rid="T1">Table 1</xref>), a single VAE was trained using all the available data. This decision was based on several key considerations.</p>
<p>Insufficient sample sizes were available for each individual class, making it impractical to effectively train separate VAEs for each class.</p>
<p>Previous studies have highlighted the difficulty of discriminating between respective classes, such as distinguishing healthy postures from pathological ones, using ML classifiers (<xref ref-type="bibr" rid="B12">Dindorf et al., 2021b</xref>). This suggests that there is limited class-specific information that can be exploited.</p>
<p>Opting for a single VAE offers the advantage of capturing shared patterns and common features that potentially exist across various classes. This approach aims to uncover the underlying similarities that might be overlooked by class-specific models.</p>
<p>By employing a single VAE, the model was designed to learn a universal latent space that remained independent of class labels. This allowed the model to focus on extracting general representations that were common to all classes without being biased by class-specific distinctions.</p>
</sec>
<sec id="s2-2">
<title>2.2 General workflow and evaluation procedure</title>
<p>Model development, training, and evaluation of the VAE and AE were integrated into a grouped k-fold cross-validation process (k &#x3d; 5). In each cross-validation fold, the data underwent random partitioning, with approximately 70% assigned to training, 10% to validation, and 20% to testing (the proportion of test data for each fold is given by k &#x3d; 5). It was ensured that subject-specific data, considering multiple measurements per subject, remained separate across the sets. The corresponding specific workflow is illustrated in <xref ref-type="fig" rid="F2">Figure 2</xref>. The utilization of grouped k-fold splitting, a method that prevents subject-specific data from being concurrently included in the training, validation, and test sets, offers several advantages. This approach facilitates improved hyperparameter tuning and early detection of overfitting. Furthermore, this method enhances the robustness of the model evaluation by considering the variability across different training instances. Additionally, by ensuring that subject-specific data are not mixed across the training and evaluation sets, it becomes possible to assess how well the models can be generalized to new, previously unseen subjects or data points, thereby providing a more comprehensive evaluation of the model&#x2019;s performance. The steps pertaining to this workflow are described in detail in the following sections.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Workflow of the generation, testing and evaluation of the synthetic data. RTSD &#x3d; dataset with 50% real, 50% synthetic data; RT3SD &#x3d; dataset with 25% real, 75% synthetic data; MSE &#x3d; Mean Squared Error; SPM &#x3d; Statistical Parametric Mapping; AE &#x3d; Autoencoder; VAE &#x3d; Variational Autoencoder.</p>
</caption>
<graphic xlink:href="fbioe-12-1350135-g002.tif"/>
</fig>
</sec>
<sec id="s2-3">
<title>2.3 VAE implementation</title>
<p>For data generation in our study, we opted for a VAE over a GAN for several compelling reasons. GANs typically require a more extensive and diverse dataset to perform effectively. They thrive when presented with substantial amounts of data that capture intricate patterns and nuances. GANs are sensitive to hyperparameter choices and can suffer from issues such as mode collapse (<xref ref-type="bibr" rid="B49">Saxena and Cao, 2022</xref>). In this case, the posture data were not sufficiently extensive to fully harness the potential of the GAN. Our preliminary study, which involved exploratory work with the available posture data, confirmed that VAE outperformed GANs when considering our dataset in terms of both the data quality and stability observed during the training process.</p>
<p>A VAE is an artificial neural network employed for generative tasks. It functions by encoding the input data into a lower-dimensional latent space and then decoding it back into the original data space. The key innovation of a VAE is its ability to model probability distributions in a latent space, allowing it to generate new similar data samples by sampling from these distributions. This makes VAEs particularly useful for tasks, such as data generation, denoising, and representation learning. In short the general information flow in a VAE can described the following (please refer to (<xref ref-type="bibr" rid="B64">Zhao S. et al., 2017</xref>) for a detailed description):</p>
<p>The encoder takes input data <italic>x</italic> and produces parameters for a probability distribution over the latent space. Let <italic>z</italic> be the latent variable, <italic>q</italic> (<italic>z</italic>&#x2223;<italic>x</italic>) is the approximate posterior distribution, <italic>p</italic>(<italic>z</italic>) is the prior distribution (usually a standard Gaussian), and <italic>&#x3bc;</italic>(<italic>x</italic>) and <italic>&#x3c3;</italic>(<italic>x</italic>) are the mean and standard deviation predicted by the encoder. The latent variable z is sampled from the distribution:<disp-formula id="equ1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>Z</mml:mi>
<mml:mo>&#x223c;</mml:mo>
<mml:mi mathvariant="normal">N</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>&#x3bc;</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>,</mml:mo>
<mml:mi>&#x3c3;</mml:mi>
<mml:msup>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:msup>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
</p>
<p>The decoder takes the sampled latent variable <italic>z</italic> and reconstructs the input data <italic>x</italic>. The conditional distribution of the data given the latent variable is modeled as <italic>p</italic> (<italic>x</italic>&#x2223;<italic>z</italic>). The reconstructed data <italic>x&#x302;</italic> is sampled from this distribution.</p>
<p>The training objective for a VAE is based on the Evidence Lower Bound (ELBO), which is defined as follows:<disp-formula id="equ2">
<mml:math id="m2">
<mml:mrow>
<mml:mtext>ELBO</mml:mtext>
<mml:mo>&#x3d;</mml:mo>
<mml:msub>
<mml:mi mathvariant="normal">E</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>z</mml:mi>
<mml:mo>&#x2223;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>x</mml:mi>
<mml:mo>&#x2223;</mml:mo>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mtext>&#x2009;KL</mml:mtext>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="|">
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>z</mml:mi>
<mml:mo>&#x7c;</mml:mo>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2225;</mml:mo>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="|">
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</disp-formula>
</p>
<p>The first term is the reconstruction term, encouraging the model to generate data similar to the input. The second term is the regularization term, penalizing the divergence between the learned latent distribution <italic>q</italic> (<italic>z</italic>&#x2223;<italic>x</italic>) and the prior distribution <italic>p</italic>(<italic>z</italic>).</p>
<p>The information flows from the input data through the encoder to the latent space, and then from the latent space through the decoder to reconstruct the data. The objective during training is to maximize the ELBO, thereby encouraging the model to learn a useful latent representation of the input data.</p>
<p>It aims at a smaller latent dimension than the original number of features to capture the most important features and reduce the complexity of data representation by learning a more compact representation of the data. Furthermore, this proved useful because the smaller latent dimensions acted as a form of regularization, preventing the VAE from overfitting the training data (<xref ref-type="bibr" rid="B37">Mahmud et al., 2020</xref>). In addition, it has been suggested that when the latent dimension is smaller, the decoder must generate data with fewer degrees of freedom, which can lead to more coherent and structured generated samples (<xref ref-type="bibr" rid="B66">Zhao et al., 2019</xref>).</p>
<p>To determine the model architecture, a grid hyperparameter search was performed based on the accuracy of the combined losses (reconstruction loss and KL divergence loss) in the validation set. We varied the latent vector length (5, 10, 15, and 20), two hidden layer sizes for the encoder and decoder (54, 108, 256, and 500), batch size (32, 64, and 128), learning rate (0.01, 0.001, and 0.0001), and number of epochs (200, 400, 600, and 1,000). The VAE model employs an Adam optimizer to minimize the combined loss function. Based on each training set, scaling was applied using StandardScaler from Scikit-learn (<xref ref-type="bibr" rid="B46">Pedregosa et al., 2011</xref>). The final model has the following configuration:</p>
<p>The encoder network operated on the input posture data (shape: 54) through two dense layers with specific sizes of 256 and 108, utilizing both Rectified Linear Unit (ReLU) activation functions. These layers reduced the input data to a latent space of 15 dimensions. This is followed by a symmetric decoder section comprising two corresponding dense layers, both employing ReLU activation, and an additional final layer employing linear activation. The epochs were set to 400 with a learning rate of 0.001, and a batch size of 128. For a visual representation of the architecture of the VAE please refer to <xref ref-type="fig" rid="F3">Figure 3</xref>.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Visualization of the VAE architecture in the current study. The values in the white boxed represent the layer sizes.</p>
</caption>
<graphic xlink:href="fbioe-12-1350135-g003.tif"/>
</fig>
<p>Although the intermediate losses employed during VAE training are pivotal for the training process, they may not be as informative or comparable. Instead, we report the Mean Squared Error (MSE) to evaluate the reconstruction errors and conduct model comparisons.</p>
</sec>
<sec id="s2-4">
<title>2.4 Evaluation synthetic data</title>
<p>To evaluate the distinguishability of synthetic data from real data, we adopted three distinct approaches: (a) judgment by domain experts, (b) implementation of an ML classifier, and (c) statistical evaluation using SPM.</p>
<p>First, we generated synthetic data for each VAE model during cross-validation of the required size (see below). Therefore, random sampling from a standard Gaussian distribution was performed to generate latent vectors. These latent vectors are then passed through the decoder component of the trained VAE model. Subsequently, we combined the original data from the test set with synthetic data, enabling us to perform the identification tasks denoted as (a), (b), and (c). For expert-based evaluation (a) and SPM analysis (c), we rescaled the feature values to match the scale and distribution of the original data. This was done to ensure that the experts could assess the data in an accustomed manner while preserving the fidelity of their evaluation process:</p>
<p>a) In the expert-based evaluation, we opted for a random subset of 100 real and 100 synthetic data samples because a comprehensive assessment was economically infeasible due to constraints on the experts. Therefore, for each fold, we randomly selected and combined ten real samples from the test set with ten synthetic samples generated by the respective VAE model. Each sample underwent an independent evaluation by three experts, and the final expert-based classification was determined via a majority vote. These experts possessed extensive experience working with spinal data and were familiar with the dataset. During the evaluation, the data were presented visually, similar to the illustration in <xref ref-type="fig" rid="F4">Figure 4</xref>. The expert ratings were organized, and the accuracy for each rater and across all ratings was calculated using MATLAB (MathWorks, Natick, Massachusetts, United States). The loose majority voting was calculated based on (<xref ref-type="bibr" rid="B3">Ballabio et al., 2019</xref>). Fleiss&#x2019; kappa was calculated using the SPSS software (IBM, Armonk, New York, United States).</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Visual comparison of 50 exemplary real (black line) and 50 exemplary synthetic (red line) data samples. Data are rescaled to original feature space.</p>
</caption>
<graphic xlink:href="fbioe-12-1350135-g004.tif"/>
</fig>
<p>b) We conducted a supervised classification task to discriminate between real (all test set samples) and synthetic samples equal in size to the test set. To achieve this, we employed a k-nearest neighbour classifier with k &#x3d; 10. The other parameters were set to the default scikit-learn parameters (<xref ref-type="bibr" rid="B46">Pedregosa et al., 2011</xref>). To gauge the effectiveness of the classifier in distinguishing between the two data types, we leveraged the cross-validation accuracy score derived from a 5-fold cross-validation procedure.</p>
<p>c) For further evaluation of the synthetic data based on (<xref ref-type="bibr" rid="B6">Bicer et al., 2022</xref>) the statistical difference between the synthetic and real data for each vertebrae in the anatomical plane were compared employing a non-parametric 1D two-tailed unpaired <italic>t</italic>-test (<italic>&#x3b1;</italic> &#x3d; 0.05) using the spm1d package (Pataky et al., 2013) in MATLAB. Hence, in the actual dataset, a single sample was randomly chosen for each subject. A synthetic dataset of equal size (n &#x3d; 338) was created by randomly selecting synthetic samples generated during the cross-validation folds.</p>
</sec>
<sec id="s2-5">
<title>2.5 Use case evaluation AE</title>
<p>For present studies that use case evaluation, the primary emphasis should be dimensionality reduction. We do not focus on the generative capabilities or probabilistic modeling offered by VAEs. Our objective is to establish a deterministic mapping from the input data to a latent representation, ensuring that similar input data points are consistently mapped to similar points in the latent space without introducing any randomness. To satisfy these criteria, we chose to utilize an AE because it does not introduce a probabilistic element that could result in variations within the latent-space representations. Second, AEs are simpler to implement and incur less computational overhead. Unlike VAEs, AEs do not require complex probabilistic modeling or variational inference techniques.</p>
<p>We evaluated the potential usefulness of artificially created posture data using the VAE training of the AE in three different scenarios:<list list-type="simple">
<list-item>
<p>&#x2022; Utilizing only the unaugmented data as training data, referred to as <italic>RTD</italic> (100% real training data).</p>
</list-item>
<list-item>
<p>&#x2022; Employing the real data combined with synthetic data in equal proportions in the training dataset, denoted as <italic>RTSD</italic> (50% real, 50% synthetic).</p>
</list-item>
<list-item>
<p>&#x2022; Expanding the real data with synthetic data three times its size, labelled as <italic>RT3SD</italic> (25% real, 75% synthetic).</p>
</list-item>
</list>
</p>
<p>For augmented data generation, we randomly selected one trained VAE model that resulted from the cross-validation process and created synthetic data of the respective sizes, as described in the previous section.</p>
<p>The AE was trained during grouped k-fold cross-validation, similar to the training of the VAE (k &#x3d; 5), to assess how well the AE could generalize its learned representations to new, previously unseen subjects. Scaling was applied based on each training set (without synthetic data) using StandardScaler from Scikit-learn.</p>
<p>Similar to VAE, a grid hyperparameter search guided by the validation set accuracy using unaugmented data was performed. The latent dimension was set to be equal to that of the VAE, and the number of hidden layers was set to three. The hidden layer sizes (25, 50, 100, 250, and 500) of the encoder and decoder, batch sizes (32, 64, and 128), and learning rates (0.01, 0.001, and 0.0001) were varied. Early stopping was integrated into the training procedure, which involved monitoring the validation loss and restoring the best weights when necessary, with a patience setting of 10 epochs and a maximum of 1,000 epochs. This approach led to the final deep AE configuration as follows:</p>
<p>The model was structured with an encoder section featuring three dense layers (500, 250, and 50 units in the first, second, and third layers, respectively), which collectively reduced the input data into a 15-dimensional latent space. This was followed by a symmetric decoder section consisting of three corresponding dense layers. All of these layers utilize ReLU activation functions, except for the final layer of the encoder and decoder, which employs a linear activation function. To train the AE, we employed the MSE loss function in combination with the Adam optimizer (learning rate &#x3d; 0.001) and a batch size of 64.</p>
<p>Finally, we explored the potential for reducing the latent dimension while maintaining the same reconstruction accuracy as in the unaugmented data by augmenting the training data while preserving other hyperparameters. This exploration was guided by a manual search procedure that considered the accuracy of the validation set.</p>
</sec>
<sec id="s2-6">
<title>2.6 Statistics and further calculations</title>
<p>Modeling was implemented using the TensorFlow (<xref ref-type="bibr" rid="B1">Abadi et al., 2016</xref>) and Keras (<xref ref-type="bibr" rid="B9">Chollet, 2015</xref>) frameworks. Visualization was performed employing matplotlib (<xref ref-type="bibr" rid="B23">Hunter, 2007</xref>). Visual exploration of the latent space was performed with Uniform Manifold Approximation and Projection for Dimension Reduction (UMAP) (<xref ref-type="bibr" rid="B40">McInnes et al., 2018</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>3 Results</title>
<sec id="s3-1">
<title>3.1 VAE and synthetic data evaluation</title>
<p>The reconstruction errors of the trained VAE are listed in <xref ref-type="table" rid="T2">Table 2</xref>. Subsequently, the trained VAE was employed to generate synthetic data. Both generated synthetic data samples as well as real posture data samples are visually presented and compared alongside each other in <xref ref-type="fig" rid="F4">Figure 4</xref>. Notably, there were no discernible systematic differences between the real and synthetic data when viewed visually. This was also statistically confirmed by the SPM, which showed that for no vertebrae, the difference between the real and synthetic data was significant (<xref ref-type="fig" rid="F5">Figure 5</xref>).</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>MSE results for the VAE and AE for each cross-validation fold, as well as mean and SD (bold values) over all folds. Data: RTD &#x3d; 100% real training data; RTSD &#x3d; 50% real, 50% synthetic; RT3SD &#x3d; 25% real, 75% synthetic.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th rowspan="3" align="center">Fold</th>
<th rowspan="2" colspan="3" align="center">VAE</th>
<th colspan="6" align="center">AE (latent dim. 15)</th>
<th colspan="6" align="center">AE (latent dim. 7)</th>
</tr>
<tr>
<th colspan="2" align="center">RTD</th>
<th colspan="2" align="center">RTSD</th>
<th colspan="2" align="center">RT3SD</th>
<th colspan="2" align="center">RTD</th>
<th colspan="2" align="center">RTSD</th>
<th colspan="2" align="center">RT3SD</th>
</tr>
<tr>
<th align="center">Train MSE</th>
<th align="center">Val. MSE</th>
<th align="center">Test MSE</th>
<th align="center">Train MSE</th>
<th align="center">Test MSE</th>
<th align="center">Train MSE</th>
<th align="center">Test MSE</th>
<th align="center">Train MSE</th>
<th align="center">Test MSE</th>
<th align="center">Train MSE</th>
<th align="center">Test MSE</th>
<th align="center">Train MSE</th>
<th align="center">Test MSE</th>
<th align="center">Train MSE</th>
<th align="center">Test MSE</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">1</td>
<td align="center">0.01</td>
<td align="center">0.14</td>
<td align="center">0.15</td>
<td align="center">0.02</td>
<td align="center">0.17</td>
<td align="center">0.01</td>
<td align="center">0.01</td>
<td align="center">0.01</td>
<td align="center">0.01</td>
<td align="center">0.17</td>
<td align="center">0.34</td>
<td align="center">0.10</td>
<td align="center">0.07</td>
<td align="center">0.05</td>
<td align="center">0.06</td>
</tr>
<tr>
<td align="center">2</td>
<td align="center">0.01</td>
<td align="center">0.13</td>
<td align="center">0.13</td>
<td align="center">0.07</td>
<td align="center">0.19</td>
<td align="center">0.01</td>
<td align="center">0.02</td>
<td align="center">0.01</td>
<td align="center">0.02</td>
<td align="center">0.18</td>
<td align="center">0.33</td>
<td align="center">0.07</td>
<td align="center">0.08</td>
<td align="center">0.11</td>
<td align="center">0.12</td>
</tr>
<tr>
<td align="center">3</td>
<td align="center">0.01</td>
<td align="center">0.11</td>
<td align="center">0.13</td>
<td align="center">0.04</td>
<td align="center">0.18</td>
<td align="center">0.01</td>
<td align="center">0.02</td>
<td align="center">0.01</td>
<td align="center">0.01</td>
<td align="center">0.18</td>
<td align="center">0.30</td>
<td align="center">0.08</td>
<td align="center">0.08</td>
<td align="center">0.10</td>
<td align="center">0.09</td>
</tr>
<tr>
<td align="center">4</td>
<td align="center">0.01</td>
<td align="center">0.10</td>
<td align="center">0.12</td>
<td align="center">0.01</td>
<td align="center">0.15</td>
<td align="center">0.01</td>
<td align="center">0.01</td>
<td align="center">0.01</td>
<td align="center">0.01</td>
<td align="center">0.14</td>
<td align="center">0.35</td>
<td align="center">0.09</td>
<td align="center">0.08</td>
<td align="center">0.06</td>
<td align="center">0.06</td>
</tr>
<tr>
<td align="center">5</td>
<td align="center">0.01</td>
<td align="center">0.18</td>
<td align="center">0.11</td>
<td align="center">0.01</td>
<td align="center">0.14</td>
<td align="center">0.01</td>
<td align="center">0.09</td>
<td align="center">0.01</td>
<td align="center">0.09</td>
<td align="center">0.17</td>
<td align="center">0.29</td>
<td align="center">0.03</td>
<td align="center">0.39</td>
<td align="center">0.04</td>
<td align="center">0.37</td>
</tr>
<tr>
<td align="center">
<bold>Mean</bold>
</td>
<td align="center">
<bold>0.01</bold>
</td>
<td align="center">
<bold>0.13</bold>
</td>
<td align="center">
<bold>0.13</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.17</bold>
</td>
<td align="center">
<bold>0.01</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.01</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.17</bold>
</td>
<td align="center">
<bold>0.32</bold>
</td>
<td align="center">
<bold>0.07</bold>
</td>
<td align="center">
<bold>0.14</bold>
</td>
<td align="center">
<bold>0.08</bold>
</td>
<td align="center">
<bold>0.14</bold>
</td>
</tr>
<tr>
<td align="center">
<bold>SD</bold>
</td>
<td align="center">
<bold>0.00</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.01</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.02</bold>
</td>
<td align="center">
<bold>0.00</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.00</bold>
</td>
<td align="center">
<bold>0.04</bold>
</td>
<td align="center">
<bold>0.02</bold>
</td>
<td align="center">
<bold>0.02</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.14</bold>
</td>
<td align="center">
<bold>0.03</bold>
</td>
<td align="center">
<bold>0.13</bold>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>SPM results displayed for each anatomical plane. The blue line and area each represent the mean and SD of the real data, while the red line corresponds to synthetic data. The red dotted lines indicate the critical t-values, signifying the absence of significant differences in-between. Additionally, the black line depicts the t-values observed for each vertebra.</p>
</caption>
<graphic xlink:href="fbioe-12-1350135-g005.tif"/>
</fig>
<p>The results of the ML and expert-based evaluations assessing the separability of real and synthetically generated posture data using the VAE are presented in <xref ref-type="table" rid="T3">Table 3</xref>. Both the ML classifier and human experts struggled to accurately distinguish between synthetic and real data, with experts exhibiting a notably poorer performance than the ML classifier.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Separability as classification results or real and synthetic posture data comparing experts and human performance.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th rowspan="2" align="center">Accuracy</th>
<th rowspan="2" align="left"/>
<th colspan="2" align="center">ML evaluation</th>
<th colspan="2" align="center">Human experts&#x2019; evaluation</th>
</tr>
<tr>
<th colspan="2" align="center">66.53% &#xb1; 2.72%</th>
<th colspan="2" align="center">52.17% (<italic>&#x3ba;</italic> &#x3d; .073)</th>
</tr>
<tr>
<th align="left"/>
<th align="left"/>
<th align="center">Actual real</th>
<th align="center">Actual synthetic</th>
<th align="center">Actual real</th>
<th align="center">Actual synthetic</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td rowspan="2" align="left">Confusion Matrix</td>
<td align="center">Predicted real</td>
<td align="center">562</td>
<td align="center">234</td>
<td align="center">160</td>
<td align="center">147</td>
</tr>
<tr>
<td align="center">Predicted synthetic</td>
<td align="center">431</td>
<td align="center">759</td>
<td align="center">140</td>
<td align="center">153</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The first rater&#x2019;s accuracy was 52.00%, the second one achieved 51.00%, and the third rater rated 53.50% of all cases correctly. The interrater reliability is calculated at &#x03BA; &#x3d; .073 indicating that only slight agreement between the raters (<xref ref-type="bibr" rid="B31">Landis and Koch, 1977</xref>). Loose majority vote (50%) shows data was more often rated as real (real &#x3d; 307, synthetic &#x3d; 293).</p>
</sec>
<sec id="s3-2">
<title>3.2 Use case evaluation AE</title>
<p>The reconstruction errors for the AE and the real and augmented datasets are listed in <xref ref-type="table" rid="T2">Table 2</xref>. An evident enhancement of more than five times in the accuracy of the test set reconstruction becomes strikingly apparent when the training data are expanded with synthetic data using the VAE. This improvement was particularly prominent when using synthetic data of equal proportions in training (RTSD). Extending the original data to three times its size (RT3SD) only slightly reduced the test-set reconstruction error.</p>
<p>The impact of this augmentation on the reconstruction quality becomes apparent when visually comparing the performance of the AE with and without the inclusion of synthetic data. This comparison demonstrates the superior reconstruction with the augmented dataset (see <xref ref-type="fig" rid="F6">Figure 6</xref>).</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Comparison between actual (represented by black lines with markers) and reconstructed data. The blue lines show reconstructions using solely the original training data, while the orange lines denote reconstructions based on training data augmented by synthetic data (RTSD) generated through the VAE for exemplary four subjects Data is rescaled to original feature space.</p>
</caption>
<graphic xlink:href="fbioe-12-1350135-g006.tif"/>
</fig>
<p>Explorative reduction of the latent dimension from 15 to seven while keeping the other hyperparameters leads to a slightly better reconstruction performance of the AE while training with the augmented data compared to training only on the unaugmented data with a latent space of 15. In contrast, when using only the unaugmented data RTD with three latent dimensions, the performance deteriorated significantly.</p>
<p>Visualization exploration of the latent space using UMAP (<xref ref-type="bibr" rid="B40">McInnes et al., 2018</xref>) (<xref ref-type="fig" rid="F7">Figure 7</xref>) shows no clearly visible clusters and no clear grouping of the datasets used for the study (healthy, back pain, spinal fusion, osteoarthritis).</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Latent space visualization using UMAP (<xref ref-type="bibr" rid="B40">McInnes et al., 2018</xref>) for a latent dimension of 15 and training using the augmented data RT3SD. The color code represents the class membership according to the datasets used (see <xref ref-type="table" rid="T1">Table 1</xref>).</p>
</caption>
<graphic xlink:href="fbioe-12-1350135-g007.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>4 Discussion</title>
<p>This study addresses a critical issue in the field of biomechanics: scarcity of data for the development of ML models. Our exploration of the use of generative AI to generate synthetic posture data offers promising insights into how limited data challenges can be mitigated and how biomechanical ML can be enhanced.</p>
<p>The promising results regarding loss reduction, as well as the low MSE values for data reconstruction, indicate the VAE&#x2019;s ability to capture the underlying features of the data distribution and show that it is generally possible to develop a VAE model on posture data. Our results align with those of recent biomechanical studies that have successfully applied Variational VAEs to capture essential data distribution features (<xref ref-type="bibr" rid="B22">Huang and Zhang, 2023</xref>; <xref ref-type="bibr" rid="B29">Kneifl et al., 2023</xref>).</p>
<p>Addressing the quality of synthetic data is of pivotal concern when it is applied to ML tasks. The synthetic data closely mirror the characteristics of the real data (<xref ref-type="bibr" rid="B50">Sharifi Renani et al., 2021</xref>). However, evaluating the quality of the synthetic data in the absence of a definitive benchmark dataset is challenging. Although various quantitative metrics have been suggested (<xref ref-type="bibr" rid="B67">Zhou et al., 2019</xref>), their applicability in the biomechanical context remains limited (<xref ref-type="bibr" rid="B6">Bicer et al., 2022</xref>). To overcome this challenge, we adopted a comprehensive evaluation approach for synthetic data, encompassing both objective assessments through ML classification and SPM and subjective evaluations through expert ratings.</p>
<p>Visually, the synthetic data closely resemble the real data. On a statistical basis, employing SPM, no discernible differences were detected between the real and synthetic data. Moreover, when evaluated by both ML classifiers and domain experts, distinguishing between real and synthetically generated posture data proved highly challenging. The experts exhibited minimal-to-negligible consensus, underscoring the inherent challenges of such assessments. This multifaceted evaluation collectively indicates that the synthetic data generated by the VAE exhibit a high level of quality and maintain consistency with the real-world posture data. Consequently, it can be concluded that the proposed VAE is highly effective for generating synthetic posture data that accurately emulate real data.</p>
<p>Incorporating synthetically generated posture data into the ML process, here with the use case example of an AE, yielded notable improvements in training and test set reconstruction accuracy. This is in line with several studies that demonstrated that AE benefits from larger datasets (<xref ref-type="bibr" rid="B63">Zhao et al., 2015</xref>). When incorporating synthetic data, a remarkable improvement in the accuracy of the test set reconstruction became evident, with a more than seven-fold reduction in the test set MSE compared with using unaugmented data for training. These results suggest that augmenting the training data for training an AE with synthetic examples by means of a VAE not only enhances the model&#x2019;s ability to reconstruct the data it was trained on but also improves its generalization to unseen test data, which has also been reported in other works (<xref ref-type="bibr" rid="B57">Wan et al., 2017</xref>; <xref ref-type="bibr" rid="B30">Kornish et al., 2018</xref>).</p>
<p>It is important to note that alternative approaches to data augmentation have the potential to enhance model performance when dealing with limited data. For instance, transfer learning, an ML technique, allows a model to leverage the knowledge gained from a previous task to enhance its generalizability to a new task. Transfer learning compensates for the scarcity of labeled data by transferring knowledge from other well-labeled data sources. To address the shortage of abnormal gait data, researchers have (<xref ref-type="bibr" rid="B44">Pandit et al., 2019</xref>; <xref ref-type="bibr" rid="B39">Martinez and Leon, 2020</xref>) employed various neural networks pretrained on extensive datasets. One approach of interest could involve combining transfer learning with subsequent training on augmented data.</p>
<p>Visual exploration of the latent space revealed a notable absence of distinct clusters and clear groupings among the datasets used in this study, encompassing postures categorized as healthy and those associated with back pain, spinal fusion, and osteoarthritis. This finding underlines the challenges in discriminating between healthy and pathological postures, a hurdle that previous research has highlighted when employing ML classifiers without the benefit of feature learning techniques (<xref ref-type="bibr" rid="B12">Dindorf et al., 2021b</xref>). Considering these challenges, the findings of this study are comprehensible and contribute to the current state of research by demonstrating that even the application of feature learning through an AE does not yield a discernible enhancement in discriminability.</p>
<p>Notably, our VAE was not trained separately for each class for the aforementioned reasons, which may have resulted in a mixed latent space in which class-specific information was not well separated. Consequently, class-specific discriminative characteristics may not be as pronounced in the synthetic data, potentially impeding the formation of discernible clusters. Future research should consider including dynamic movement data from the spine as a promising direction. The dynamic aspects of posture and movement could potentially offer more distinctive class differences, potentially facilitating the identification of clusters; hence, there is significant inter-subject variability in spine movement, for example, during gait (<xref ref-type="bibr" rid="B48">Prost et al., 2021</xref>). In the context of distinguishing between biological sexes, recent findings have indicated a significant improvement in classification accuracy when utilizing dynamic data as opposed to relying solely on static data (<xref ref-type="bibr" rid="B13">Dindorf et al., 2021c</xref>). This highlights the potential of using dynamic data to enhance the accuracy of classification models for specific applications.</p>
<p>Although our research has yielded promising insights into the use of generative AI to address data scarcity in biomechanical ML, it is crucial to acknowledge several limitations that should be considered when interpreting the results and planning future studies. Despite the favorable results in distinguishing synthetic data from real data, it is important to mention that there may still be subtle differences between the two. Synthetic data, although visually and quantitatively similar, may not capture all of the intricacies of real-world biomechanical postures, potentially leading to limitations in specific applications where extreme precision is required.</p>
<p>This study primarily relied on a specific dataset obtained from a particular group of subjects via surface topography. The effectiveness of the generative AI approach may vary when applied to different biomechanical datasets or to data collected using diverse measurement techniques. The ability of the model to be generalized to broader and more diverse populations requires further investigation.</p>
<p>Although our results demonstrate the benefits of augmenting the training dataset with synthetic data, the optimal balance between real and synthetic data remains an open question. The study could only show that with the current AE expanding the real data with synthetic data to three times their size (RT3SD) slightly improved the reconstruction performance compared to real data combined with synthetic data of equal proportions (RTSD). Further research is required to explore the potential impacts of varying proportions of synthetic data.</p>
<p>The use of synthetic data in healthcare raises ethical concerns. On one hand, it mitigates privacy risks by minimizing the demand for additional patient data, thereby reducing the risk of data breaches. However, synthetic data may not fully represent the complexities of actual patient data, potentially leading to biased or inaccurate outcomes. The extent to which accountability applies, in this case, must be discussed in a context-specific manner.</p>
<p>Future directions may involve extending the application of generative AI to other biomechanical domains such as dynamic spinal data. An increase in the volume of accessible posture data has the potential to significantly enhance the applicability of GANs. Therefore, a future comparative analysis between GANs and the approach presented in this study, if feasible, is considered important. Additionally, investigating the impact of synthetic data on various ML architectures or distinct tasks, such as regression or classification, is a promising area of research. In the context of gait data, deep generative models combined with differentiable physics engines have been proposed to ensure that the generated data are in line with physical laws (physically informed modeling) (<xref ref-type="bibr" rid="B52">Takeishi and Kalousis, 2021</xref>). The adoption of this methodology in the context of posture data could ensure the realism of the generated data and should be evaluated in future studies. Furthermore, an intriguing direction for future research could be the exploration of an extended VAE that conditions data generation or reconstruction on additional information, such as class labels, or other attributes, such as biological sex. This exploration is particularly relevant as existing studies highlight the presence of biological sex differences in spinal data (<xref ref-type="bibr" rid="B61">Yukawa et al., 2018</xref>; <xref ref-type="bibr" rid="B42">Mohan and Huynh, 2019</xref>; <xref ref-type="bibr" rid="B35">Ludwig et al., 2023</xref>). These models, known as Conditional Variational Autoencoders (CVAEs) (<xref ref-type="bibr" rid="B65">Zhao T. et al., 2017</xref>), can accentuate the class membership, potentially leading to the generation of more realistic posture data by incorporating additional subject characteristics. To the best of our knowledge, this application has not been explored in the biomechanical domain.</p>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>In summary, our study underscores the potential of generative AI, specifically VAEs, in addressing data-scarcity challenges within the biomechanics field. By generating synthetic posture data that closely mirror real-world observations, our study presents a viable approach path for expanding datasets, strengthening model performance, and advancing biomechanical applications.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The datasets for this article are not publicly available due to concerns regarding participant/patient anonymity. Requests to access the datasets should be directed to the corresponding author.</p>
</sec>
<sec id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by the Medical Chamber Rhineland-Palatinate and registered with the WHO (INT: DRKS00010834, DRKS00014325, DRKS00013145, and DRKS00017240). The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s8">
<title>Author contributions</title>
<p>CD: Conceptualization, Formal Analysis, Investigation, Methodology, Software, Validation, Visualization, Writing&#x2013;original draft, Writing&#x2013;review and editing. JD: Conceptualization, Formal Analysis, Investigation, Methodology, Software, Visualization, Writing&#x2013;original draft, Writing&#x2013;review and editing. JKo: Funding acquisition, Investigation, Project administration, Supervision, Writing&#x2013;original draft, Writing&#x2013;review and editing. CW: Investigation, Writing&#x2013;original draft, Writing&#x2013;review and editing. SB: Conceptualization, Writing&#x2013;original draft, Writing&#x2013;review and editing. SS: Writing&#x2013;original draft, Writing&#x2013;review and editing. JH: Data curation, Investigation, Writing&#x2013;review and editing. FW: Data curation, Investigation, Writing&#x2013;review and editing. JKn: Data curation, Investigation, Writing&#x2013;review and editing. PD: Funding acquisition, Project administration, Resources, Writing&#x2013;review and editing. UB: Funding acquisition, Project administration, Resources, Writing&#x2013;review and editing. MF: Funding acquisition, Project administration, Resources, Writing&#x2013;review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This research received support from the AI Junior Research Funding Program at the University of Kaiserslautern-Landau.</p>
</sec>
<ack>
<p>The authors would like to thank all the participants involved in this study. They also appreciate the support received from their colleagues during the recruitment process. Finally, the authors express their gratitude to Kjell Heitmann, Amira Basic, and Helmut Diers for their multifarious endorsements and technological support.</p>
</ack>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fbioe.2024.1350135/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fbioe.2024.1350135/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Table1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Abadi</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Barham</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Davis</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Dean</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2016</year>). &#x201c;<article-title>{TensorFlow}: a system for {Large-Scale} machine learning</article-title>,&#x201d; in <source>{TensorFlow}: a system for {Large-Scale} machine learning</source>. Editor <person-group person-group-type="editor">
<name>
<surname>Keeton</surname>
<given-names>K.</given-names>
</name>
</person-group> (<publisher-loc>United States</publisher-loc>: <publisher-name>USENIX Association</publisher-name>), <fpage>265</fpage>&#x2013;<lpage>283</lpage>.</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Alzubaidi</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Bai</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Al-Sabaawi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Santamar&#xed;a</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Albahri</surname>
<given-names>A. S.</given-names>
</name>
<name>
<surname>Al-dabbagh</surname>
<given-names>B. S. N.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>A survey on deep learning tools dealing with data scarcity: definitions, challenges, solutions, tips, and applications</article-title>. <source>J. Big Data</source> <volume>10</volume> (<issue>1</issue>), <fpage>46</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.1186/s40537-023-00727-2</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ballabio</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Todeschini</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Consonni</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Recent advances in high-level fusion methods to classify multiple analytical chemical data</article-title>. <source>Data Handl. Sci. Technol.</source> <volume>31</volume>, <fpage>129</fpage>&#x2013;<lpage>155</lpage>. <pub-id pub-id-type="doi">10.1016/B978-0-444-63984-4.00005-3</pub-id>
</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barnes</surname>
<given-names>K. R.</given-names>
</name>
<name>
<surname>Kilding</surname>
<given-names>A. E.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Strategies to improve running economy</article-title>. <source>Sports Med.</source> <volume>45</volume> (<issue>1</issue>), <fpage>37</fpage>&#x2013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1007/s40279-014-0246-y</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bayer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kaufhold</surname>
<given-names>M.-A.</given-names>
</name>
<name>
<surname>Buchhold</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Keller</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Dallmeyer</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Reuter</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Data augmentation in natural language processing: a novel text generation approach for long and short text classifiers</article-title>. <source>Int. J. Mach. Learn Cybern.</source> <volume>14</volume> (<issue>1</issue>), <fpage>135</fpage>&#x2013;<lpage>150</lpage>. <pub-id pub-id-type="doi">10.1007/s13042-022-01553-3</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bicer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Phillips</surname>
<given-names>A. T. M.</given-names>
</name>
<name>
<surname>Melis</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>McGregor</surname>
<given-names>A. H.</given-names>
</name>
<name>
<surname>Modenese</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Generative deep learning applied to biomechanics: a new augmentation technique for motion capture datasets</article-title>. <source>J. Biomech.</source> <volume>144</volume>, <fpage>111301</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2022.111301</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bzdok</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Altman</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Krzywinski</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Statistics versus machine learning</article-title>. <source>Nat. Methods</source> <volume>15</volume> (<issue>4</issue>), <fpage>233</fpage>&#x2013;<lpage>234</lpage>. <pub-id pub-id-type="doi">10.1038/nmeth.4642</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ceyssens</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Vanelderen</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Barton</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Malliaras</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Dingenen</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Biomechanical risk factors associated with running-related injuries: a systematic review</article-title>. <source>Sports Med.</source> <volume>49</volume> (<issue>7</issue>), <fpage>1095</fpage>&#x2013;<lpage>1115</lpage>. <pub-id pub-id-type="doi">10.1007/s40279-019-01110-z</pub-id>
</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chollet</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2015</year>). <source>Keras</source>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://github.com/fchollet/keras">https://github.com/fchollet/keras</ext-link>
</comment>.</citation>
</ref>
<ref id="B10">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Deng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Dong</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Socher</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>L.-J.</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Fei-Fei</surname>
<given-names>Li</given-names>
</name>
</person-group> (<year>2009</year>). &#x201c;<article-title>ImageNet: a large-scale hierarchical image database</article-title>,&#x201d; in <source>ImageNet: a large-scale hierarchical image database</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dindorf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Bartaguiz</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Gassmann</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Fr&#xf6;hlich</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2022a</year>). <article-title>Conceptual structure and current trends in artificial intelligence, machine learning, and deep learning research in sports: a bibliometric review</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>20</volume> (<issue>1</issue>), <fpage>173</fpage>. <pub-id pub-id-type="doi">10.3390/ijerph20010173</pub-id>
</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dindorf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Konradi</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wolf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Taetz</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Bleser</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Huthwelker</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2021b</year>). <article-title>Classification and automated interpretation of spinal posture data using a pathology-independent classifier and explainable artificial intelligence (XAI)</article-title>. <source>Sensors</source> <volume>21</volume> (<issue>18</issue>), <fpage>6323</fpage>. <pub-id pub-id-type="doi">10.3390/s21186323</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dindorf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Konradi</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wolf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Taetz</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Bleser</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Huthwelker</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2021c</year>). <article-title>General method for automated feature extraction and selection and its application for gender classification and biomechanical knowledge discovery of sex differences in spinal posture during stance and gait</article-title>. <source>Comput. Methods Biomech. Biomed. Engin</source> <volume>24</volume> (<issue>3</issue>), <fpage>299</fpage>&#x2013;<lpage>307</lpage>. <pub-id pub-id-type="doi">10.1080/10255842.2020.1828375</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Dindorf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Konradi</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wolf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Taetz</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Bleser</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Huthwelker</surname>
<given-names>J.</given-names>
</name>
<etal/>
</person-group> (<year>2022b</year>). &#x201c;<article-title>Visualization of interindividual differences in spinal dynamics in the presence of intraindividual variabilities</article-title>,&#x201d; in <source>Visualization of interindividual differences in spinal dynamics in the presence of intraindividual variabilities</source>. Editors <person-group person-group-type="editor">
<name>
<surname>Gillmann</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Schmidt</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>J&#xe4;nicke</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Wiegreffe</surname>
<given-names>D.</given-names>
</name>
</person-group> (<publisher-loc>Leipzig</publisher-loc>: <publisher-name>Leipzig University</publisher-name>).</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dindorf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Teufl</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Taetz</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Becker</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bleser</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Fr&#xf6;hlich</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021a</year>). <article-title>Feature extraction and gait classification in hip replacement patients on the basis of kinematic waveform data</article-title>. <source>Biomed. Hum. Kinet.</source> <volume>13</volume> (<issue>1</issue>), <fpage>177</fpage>&#x2013;<lpage>186</lpage>. <pub-id pub-id-type="doi">10.2478/bhk-2021-0022</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Elkholy</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Makihara</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Gomaa</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Rahman Ahad</surname>
<given-names>M. A.</given-names>
</name>
<name>
<surname>Yagi</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Unsupervised GEI-based gait disorders detection from different views</article-title>,&#x201d; in <source>Unsupervised GEI-based gait disorders detection from different views</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ferreira</surname>
<given-names>M. I.</given-names>
</name>
<name>
<surname>Barbosa</surname>
<given-names>T. M.</given-names>
</name>
<name>
<surname>Costa</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Neiva</surname>
<given-names>H. P.</given-names>
</name>
<name>
<surname>Marinho</surname>
<given-names>D. A.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Energetics, biomechanics, and performance in masters&#x27; swimmers: a systematic review</article-title>. <source>J. Strength Cond. Res.</source> <volume>30</volume>, <fpage>2069</fpage>&#x2013;<lpage>2081</lpage>. <pub-id pub-id-type="doi">10.1519/jsc.0000000000001279</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Halilaj</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Rajagopal</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Fiterau</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Hicks</surname>
<given-names>J. L.</given-names>
</name>
<name>
<surname>Hastie</surname>
<given-names>T. J.</given-names>
</name>
<name>
<surname>Delp</surname>
<given-names>S. L.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Machine learning in human movement biomechanics: best practices, common pitfalls, and new opportunities</article-title>. <source>J. Biomech.</source> <volume>81</volume>, <fpage>1</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2018.09.009</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hernandez</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Kuli&#x107;</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Venture</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Adversarial autoencoder for visualization and classification of human activity: application to a low-cost commercial force plate</article-title>. <source>J. Biomech.</source> <volume>103</volume>, <fpage>109684</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbiomech.2020.109684</pub-id>
</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Horst</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Lapuschkin</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Samek</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>M&#xfc;ller</surname>
<given-names>K.-R.</given-names>
</name>
<name>
<surname>Sch&#xf6;llhorn</surname>
<given-names>W. I.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Explaining the unique nature of individual gait patterns with deep learning</article-title>. <source>Sci. Rep.</source> <volume>9</volume> (<issue>1</issue>), <fpage>2391</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-38748-8</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Horst</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Slijepcevic</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Simak</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Sch&#xf6;llhorn</surname>
<given-names>W. I.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Gutenberg Gait Database, a ground reaction force database of level overground walking in healthy individuals</article-title>. <source>Sci. Data</source> <volume>8</volume> (<issue>1</issue>), <fpage>232</fpage>. <pub-id pub-id-type="doi">10.1038/s41597-021-01014-6</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Three-dimensional lumbar spine generation using variational autoencoder</article-title>. <source>Med. Eng. Phys.</source> <volume>120</volume>, <fpage>104046</fpage>. <pub-id pub-id-type="doi">10.1016/j.medengphy.2023.104046</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hunter</surname>
<given-names>J. D.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>Matplotlib: a 2D graphics environment</article-title>. <source>Comput. Sci. Eng.</source> <volume>9</volume> (<issue>3</issue>), <fpage>90</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1109/MCSE.2007.55</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hussain</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Anees</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Das</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Nguyen</surname>
<given-names>B. P.</given-names>
</name>
<name>
<surname>Marzuki</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>High-content image generation for drug discovery using generative adversarial networks</article-title>. <source>Neural Netw.</source> <volume>132</volume>, <fpage>353</fpage>&#x2013;<lpage>363</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2020.09.007</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huthwelker</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Konradi</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Wolf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Westphal</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Schmidtmann</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Schubert</surname>
<given-names>P.</given-names>
</name>
<etal/>
</person-group> (<year>2023</year>). <article-title>Reference values and functional descriptions of transverse plane spinal dynamics during gait based on surface topography</article-title>. <source>Hum. Mov. Sci.</source> <volume>88</volume>, <fpage>103054</fpage>. <pub-id pub-id-type="doi">10.1016/j.humov.2022.103054</pub-id>
</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Iglesias</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Talavera</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Gonz&#xe1;lez-Prieto</surname>
<given-names>&#xc1;.</given-names>
</name>
<name>
<surname>Mozo</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>G&#xf3;mez-Canaval</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Data Augmentation techniques in time series domain: a survey and taxonomy 2023</article-title>.</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Perotte</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Tang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Ta</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Weng</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>UMLS-based data augmentation for natural language processing of clinical research literature</article-title>. <source>J. Am. Med. Inf. Assoc.</source> <volume>28</volume> (<issue>4</issue>), <fpage>812</fpage>&#x2013;<lpage>823</lpage>. <pub-id pub-id-type="doi">10.1093/jamia/ocaa309</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kiprijanovska</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Gjoreski</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Gams</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Detection of gait abnormalities for fall risk assessment using wrist-worn inertial sensors and deep learning</article-title>. <source>Sensors</source> <volume>20</volume> (<issue>18</issue>), <fpage>5373</fpage>. <pub-id pub-id-type="doi">10.3390/s20185373</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kneifl</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Rosin</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Avci</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>R&#xf6;hrle</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Fehr</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Low-dimensional data-based surrogate model of a continuum-mechanical musculoskeletal system based on non-intrusive model order reduction</article-title>. <source>Arch. Appl. Mech.</source> <volume>93</volume> (<issue>9</issue>), <fpage>3637</fpage>&#x2013;<lpage>3663</lpage>. <pub-id pub-id-type="doi">10.1007/s00419-023-02458-5</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kornish</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ezekiel</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Cornacchia</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>DCNN augmentation via synthetic data from variational autoencoders and generative adversarial networks</article-title>,&#x201d; in <source>DCNN augmentation via synthetic data from variational autoencoders and generative adversarial networks</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Landis</surname>
<given-names>J. R.</given-names>
</name>
<name>
<surname>Koch</surname>
<given-names>G. G.</given-names>
</name>
</person-group> (<year>1977</year>). <article-title>The measurement of observer agreement for categorical data</article-title>. <source>Biometrics</source> <volume>33</volume> (<issue>1</issue>), <fpage>159</fpage>. <pub-id pub-id-type="doi">10.2307/2529310</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lashgari</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Liang</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Maoz</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Data augmentation for deep-learning-based electroencephalography</article-title>. <source>J. Neurosci. Methods</source> <volume>346</volume>, <fpage>108885</fpage>. <pub-id pub-id-type="doi">10.1016/j.jneumeth.2020.108885</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lau</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Tong</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Zhu</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Support vector machine for classification of walking conditions of persons after stroke with dropped foot</article-title>. <source>Hum. Mov. Sci.</source> <volume>28</volume> (<issue>4</issue>), <fpage>504</fpage>&#x2013;<lpage>514</lpage>. <pub-id pub-id-type="doi">10.1016/j.humov.2008.12.003</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Zheng</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>Q.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wulamu</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>Synthesizing foot and ankle kinematic characteristics for lateral collateral ligament injuries detection</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>188429</fpage>&#x2013;<lpage>188440</lpage>. <pub-id pub-id-type="doi">10.1109/access.2020.3029616</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ludwig</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Dindorf</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Kelm</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Simon</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Nimmrichter</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Fr&#xf6;hlich</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Reference values for sagittal clinical posture assessment in people aged 10 to 69 years</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>20</volume> (<issue>5</issue>), <fpage>4131</fpage>. <pub-id pub-id-type="doi">10.3390/ijerph20054131</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Luo</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Tjahjadi</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Multi-set canonical correlation analysis for 3D abnormal gait behaviour recognition based on virtual sample generation</article-title>. <source>IEEE Access</source> <volume>8</volume>, <fpage>32485</fpage>&#x2013;<lpage>32501</lpage>. <pub-id pub-id-type="doi">10.1109/access.2020.2973898</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mahmud</surname>
<given-names>M. S.</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>J. Z.</given-names>
</name>
<name>
<surname>Fu</surname>
<given-names>X.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Variational autoencoder-based dimensionality reduction for high-dimensional small-sample data classification</article-title>. <source>Int. J. Comp. Intel. Appl.</source> <volume>19</volume> (<issue>01</issue>). <pub-id pub-id-type="doi">10.1142/S1469026820500029</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Marchi</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Vesperini</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Eyben</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Squartini</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Schuller</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2015</year>). &#x201c;<article-title>A novel approach for automatic acoustic novelty detection using a denoising autoencoder with bidirectional LSTM neural networks</article-title>,&#x201d; in <source>A novel approach for automatic acoustic novelty detection using a denoising autoencoder with bidirectional LSTM neural networks</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Martinez</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Leon</surname>
<given-names>P. L. de</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Falls risk classification of older adults using deep neural networks and transfer learning</article-title>. <source>IEEE J. Biomed. Health Inf.</source> <volume>24</volume> (<issue>1</issue>), <fpage>144</fpage>&#x2013;<lpage>150</lpage>. <pub-id pub-id-type="doi">10.1109/jbhi.2019.2906499</pub-id>
</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>McInnes</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Healy</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Melville</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>UMAP: Uniform Manifold approximation and projection for dimension reduction</article-title>.</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mohammadian Rad</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>van Laarhoven</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Furlanello</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Marchiori</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Novelty detection using deep normative modeling for IMU-based abnormal movement monitoring in Parkinson&#x27;s disease and autism spectrum disorders</article-title>. <source>Sensors</source> <volume>18</volume> (<issue>10</issue>), <fpage>3533</fpage>. <pub-id pub-id-type="doi">10.3390/s18103533</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mohan</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Huynh</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Sex differences in the spine</article-title>. <source>Curr. Phys. Med. Rehabil. Rep.</source> <volume>7</volume> (<issue>3</issue>), <fpage>246</fpage>&#x2013;<lpage>252</lpage>. <pub-id pub-id-type="doi">10.1007/s40141-019-00234-7</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Nguyen</surname>
<given-names>T.-N.</given-names>
</name>
<name>
<surname>Huynh</surname>
<given-names>H.-H.</given-names>
</name>
<name>
<surname>Meunier</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>Estimating skeleton-based gait abnormality index by sparse deep auto-encoder</article-title>,&#x201d; in <source>Estimating skeleton-based gait abnormality index by sparse deep auto-encoder</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B44">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Pandit</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Nahane</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Lade</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Rao</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Abnormal gait detection by classifying inertial sensor data using transfer learning</article-title>,&#x201d; in <source>Abnormal gait detection by classifying inertial sensor data using transfer learning</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B45">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Paragliola</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Coronato</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>A deep learning-based approach for the classification of gait dynamics in subjects with a neurodegenerative disease</article-title>,&#x201d; in <source>A deep learning-based approach for the classification of gait dynamics in subjects with a neurodegenerative disease</source> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>452</fpage>&#x2013;<lpage>468</lpage>.</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pedregosa</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Varoquaux</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Gramfort</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Michel</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Thirion</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Grisel</surname>
<given-names>O.</given-names>
</name>
<etal/>
</person-group> (<year>2011</year>). <article-title>Scikit-learn: machine learning in Python</article-title>.</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Phinyomark</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Petri</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Ib&#xe1;&#xf1;ez-Marcelo</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Osis</surname>
<given-names>S. T.</given-names>
</name>
<name>
<surname>Ferber</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Analysis of big data in gait biomechanics: current trends and future directions</article-title>. <source>J. Med. Biol. Eng.</source> <volume>38</volume> (<issue>2</issue>), <fpage>244</fpage>&#x2013;<lpage>260</lpage>. <pub-id pub-id-type="doi">10.1007/s40846-017-0297-2</pub-id>
</citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Prost</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Blondel</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Pomero</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Authier</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Boulay</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Jouve</surname>
<given-names>J. L.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). <article-title>Description of spine motion during gait in normal adolescents and young adults</article-title>. <source>Eur. Spine J.</source> <volume>30</volume> (<issue>9</issue>), <fpage>2520</fpage>&#x2013;<lpage>2530</lpage>. <pub-id pub-id-type="doi">10.1007/s00586-021-06918-w</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Saxena</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Cao</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Generative adversarial networks (GANs)</article-title>. <source>ACM Comput. Surv.</source> <volume>54</volume> (<issue>3</issue>), <fpage>1</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1145/3446374</pub-id>
</citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sharifi Renani</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Eustace</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>Myers</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Clary</surname>
<given-names>C. W.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The use of synthetic IMU signals in the training of deep learning models significantly improves the accuracy of joint kinematic predictions</article-title>. <source>Sensors</source> <volume>21</volume> (<issue>17</issue>), <fpage>5876</fpage>. <pub-id pub-id-type="doi">10.3390/s21175876</pub-id>
</citation>
</ref>
<ref id="B51">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Song</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bai</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>A novel approach to abnormal gait recognition based on generative adversarial networks</article-title>,&#x201d; in <source>A novel approach to abnormal gait recognition based on generative adversarial networks</source> (<publisher-loc>Singapore</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>3</fpage>&#x2013;<lpage>15</lpage>.</citation>
</ref>
<ref id="B52">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Takeishi</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Kalousis</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Variational autoencoder with differentiable physics engine for human gait analysis and synthesis</article-title>,&#x201d; in <conf-name>NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications 2021</conf-name>.</citation>
</ref>
<ref id="B53">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Tu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Mak</surname>
<given-names>M.-W.</given-names>
</name>
<name>
<surname>Chien</surname>
<given-names>J.-T.</given-names>
</name>
</person-group> (<year>2020</year>). &#x201c;<article-title>Information maximized variational domain adversarial learning for speaker verification</article-title>,&#x201d; in <source>Information maximized variational domain adversarial learning for speaker verification</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tunca</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Salur</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Ersoy</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Deep learning for fall risk assessment with inertial sensors: utilizing domain knowledge in spatio-temporal gait parameters</article-title>. <source>IEEE J. Biomed. Health Inf.</source> <volume>24</volume> (<issue>7</issue>), <fpage>1994</fpage>&#x2013;<lpage>2005</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2019.2958879</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Valamatos</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Abrantes</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Carnide</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Valamatos</surname>
<given-names>M.-J.</given-names>
</name>
<name>
<surname>Monteiro</surname>
<given-names>C. P.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Biomechanical performance factors in the track and field sprint start: a systematic review</article-title>. <source>Int. J. Environ. Res. Public Health</source> <volume>19</volume> (<issue>7</issue>), <fpage>4074</fpage>. <pub-id pub-id-type="doi">10.3390/ijerph19074074</pub-id>
</citation>
</ref>
<ref id="B56">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wahid</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Begg</surname>
<given-names>R. K.</given-names>
</name>
<name>
<surname>Hass</surname>
<given-names>C. J.</given-names>
</name>
<name>
<surname>Halgamuge</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Ackland</surname>
<given-names>D. C.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Classification of Parkinson&#x27;s disease gait using spatial-temporal gait features</article-title>. <source>IEEE J. Biomed. Health Inf.</source> <volume>19</volume> (<issue>6</issue>), <fpage>1794</fpage>&#x2013;<lpage>1802</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2015.2450232</pub-id>
</citation>
</ref>
<ref id="B57">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wan</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>He</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Variational autoencoder based synthetic data generation for imbalanced learning</article-title>,&#x201d; in <conf-name>IEEE Symposium Series on Computational Intelligence Honolulu (Hg.) 2017 &#x2013; SSCI</conf-name>. <publisher-name>IEEE</publisher-name>, <fpage>1</fpage>&#x2013;<lpage>7</lpage>. <comment>Symposium</comment>.</citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Yin</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Novel soft smart shoes for motion intent learning of lower limbs using LSTM with a convolutional autoencoder</article-title>. <source>IEEE Sensors J.</source> <volume>21</volume> (<issue>2</issue>), <fpage>1906</fpage>&#x2013;<lpage>1917</lpage>. <pub-id pub-id-type="doi">10.1109/jsen.2020.3019053</pub-id>
</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Bozchalooi</surname>
<given-names>I. S.</given-names>
</name>
<name>
<surname>Darve</surname>
<given-names>E.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Memory-augmented generative adversarial networks for anomaly detection</article-title>. <source>IEEE Trans. Neural Netw. Learn Syst.</source> <volume>33</volume> (<issue>6</issue>), <fpage>2324</fpage>&#x2013;<lpage>2334</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2021.3132928</pub-id>
</citation>
</ref>
<ref id="B60">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Yee</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Low</surname>
<given-names>C. Y.</given-names>
</name>
<name>
<surname>Hashim</surname>
<given-names>N. M.</given-names>
</name>
<name>
<surname>Hanapiah</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Theng Koh</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Che Zakaria</surname>
<given-names>N. A.</given-names>
</name>
<etal/>
</person-group> (<year>2021</year>). &#x201c;<article-title>Systematic development of machine for abnormal muscle activity detection</article-title>,&#x201d; in <source>Systematic development of machine for abnormal muscle activity detection</source> (<publisher-name>IEEE</publisher-name>).</citation>
</ref>
<ref id="B61">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yukawa</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kato</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Suda</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Yamagata</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ueta</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Yoshida</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Normative data for parameters of sagittal spinal alignment in healthy subjects: an analysis of gender specific differences and changes with aging in 626 asymptomatic individuals</article-title>. <source>Eur. Spine J.</source> <volume>27</volume> (<issue>2</issue>), <fpage>426</fpage>&#x2013;<lpage>432</lpage>. <pub-id pub-id-type="doi">10.1007/s00586-016-4807-7</pub-id>
</citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zaroug</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Lai</surname>
<given-names>D. T. H.</given-names>
</name>
<name>
<surname>Mudie</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Begg</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Lower limb kinematics trajectory prediction using long short-term memory neural networks</article-title>. <source>Front. Bioeng. Biotechnol.</source> <volume>8</volume>, <fpage>362</fpage>. <pub-id pub-id-type="doi">10.3389/fbioe.2020.00362</pub-id>
</citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Mathieu</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Goroshin</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>LeCun</surname>
<given-names>Y.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Stacked what-where auto-encoders</article-title>.</citation>
</ref>
<ref id="B64">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ermon</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Towards deeper understanding of variational autoencoding models</article-title>.</citation>
</ref>
<ref id="B65">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Eskenazi</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Learning discourse-level diversity for neural dialog models using conditional variational autoencoders</article-title>.</citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhao</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Song</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Ermon</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>InfoVAE: balancing learning and inference in variational autoencoders</article-title>. <source>AAAI</source> <volume>33</volume> (<issue>01</issue>), <fpage>5885</fpage>&#x2013;<lpage>5892</lpage>. <pub-id pub-id-type="doi">10.1609/aaai.v33i01.33015885</pub-id>
</citation>
</ref>
<ref id="B67">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhou</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Gordon</surname>
<given-names>M. L.</given-names>
</name>
<name>
<surname>Krishna</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Narcomey</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Fei-Fei</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Bernstein</surname>
<given-names>M. S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>HYPE: a benchmark for human eYe perceptual evaluation of generative models</article-title>.</citation>
</ref>
</ref-list>
</back>
</article>