<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="review-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Digit. Health</journal-id><journal-title-group>
<journal-title>Frontiers in Digital Health</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Digit. Health</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-253X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdgth.2026.1750111</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Using voice and speech data in healthcare: a scoping review of the ethical, legal and social implications</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Malo</surname><given-names>Marie-Fran&#x00E7;oise</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3217234/overview" />
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Bouhouita-Guermech</surname><given-names>Sarah</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1881303/overview" />
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Gallois</surname><given-names>Hortense</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2916026/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author"><name><surname>Ravitsky</surname><given-names>Vardit</given-names></name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/110265/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="funding-acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding-acquisition</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author">
<collab>Bridge2AI Voice Consortium</collab></contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>B&#x00E9;lisle-Pipon</surname><given-names>Jean-Christophe</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1396876/overview" />
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="funding-acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding-acquisition</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Faculty of Health Sciences, Simon Fraser University</institution>, <city>Burnaby</city>, <state>BC</state>, <country country="ca">Canada</country></aff>
<aff id="aff2"><label>2</label><institution>&#x00C9;cole de Sant&#x00E9; Publique, Universit&#x00E9; de Montr&#x00E9;al</institution>, <city>Montr&#x00E9;al</city>, <state>QC</state>, <country country="ca">Canada</country></aff>
<aff id="aff3"><label>3</label><institution>Centre of Genomics and Policy (CGP), McGill University</institution>, <city>Montr&#x00E9;al</city>, <state>QC</state>, <country country="ca">Canada</country></aff>
<aff id="aff4"><label>4</label><institution>The Hastings Center for Bioethics</institution>, <city>Garrison</city>, <state>NY</state>, <country country="us">United States</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Jean-Christophe B&#x00E9;lisle-Pipon <email xlink:href="mailto:jean-christophe_belisle-pipon@sfu.ca">jean-christophe_belisle-pipon@sfu.ca</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-10"><day>10</day><month>02</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2026</year></pub-date>
<volume>8</volume><elocation-id>1750111</elocation-id>
<history>
<date date-type="received"><day>19</day><month>11</month><year>2025</year></date>
<date date-type="rev-recd"><day>09</day><month>01</month><year>2026</year></date>
<date date-type="accepted"><day>13</day><month>01</month><year>2026</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 Malo, Bouhouita-Guermech, Gallois, Ravitsky, Bridge2AI Voice Consortium and B&#x00E9;lisle-Pipon.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>Malo, Bouhouita-Guermech, Gallois, Ravitsky, Bridge2AI Voice Consortium and B&#x00E9;lisle-Pipon</copyright-holder><license><ali:license_ref start_date="2026-02-10">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<abstract>
<p>Human voice and speech, integral to personal identity and social communication, are increasingly used as biometric and digital biomarkers in healthcare. Their collection and analysis, enabled by artificial intelligence, machine learning, and natural language processing, offer promising applications in disease detection and health monitoring. This scoping review examines the ethical, legal, and social implications (ELSIs) associated with using voice and speech data in healthcare. Following a structured search of four databases and a snowball method, 65 articles published between 2009 and 2024 were analyzed. The findings are organized into three main ELSI categories: ethical concerns include privacy breaches, challenges of informed consent, and the need for data validation and respect for vulnerable populations; social issues highlight biases, representational disparities, and risks of discrimination and data misuse; legal issues include unclear regulatory frameworks, conflicting jurisdictional mandates, and challenges in defining data ownership. The review reveals that while many ELSIs mirror those of other biomarker data, the unique properties of voice and speech require adapted frameworks for consent, data governance, and privacy protection. Technological limitations, dataset scarcity, and industry-academic divides exacerbate risks and hinder equitable development. Few studies deeply explore ELSIs in underrepresented populations, and there is a lack of robust empirical research. The review argues for a contextualist, not exceptionalist, approach to voice biomarkers, acknowledging both overlapping and unique challenges. It concludes by stressing the need for harmonized regulations, inclusive datasets, and interdisciplinary collaboration to ensure responsible, equitable integration of voice and speech technologies in healthcare.</p>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>biomarkers</kwd>
<kwd>ELSI</kwd>
<kwd>ethics</kwd>
<kwd>speech</kwd>
<kwd>voice biomarkers</kwd>
<kwd>voice</kwd>
</kwd-group><funding-group><funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the National Institute for Health Grant &#x0023;1OT2OD03270-01.</funding-statement></funding-group><counts>
<fig-count count="5"/>
<table-count count="4"/><equation-count count="0"/><ref-count count="156"/><page-count count="18"/><word-count count="0"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Ethical Digital Health</meta-value></custom-meta></custom-meta-group>
</article-meta>
</front>
<body><sec id="s1" sec-type="background"><label>1</label><title>Background</title>
<p>The production of human voice and speech is an intricate process that involves the coordination of respiration, phonation, articulation, resonation and prosody (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>) and depends on the collaboration of several motor and cognitive processes (<xref ref-type="bibr" rid="B2">2</xref>, <xref ref-type="bibr" rid="B3">3</xref>) and more than 100 muscles (<xref ref-type="bibr" rid="B4">4</xref>).<xref ref-type="fn" rid="n3"><sup>1</sup></xref> These processes are directly impacted by human physiological features. For example, the length and shape or human vocal tract and the dynamic configuration of articulatory organs can directly affect sound and speech production (<xref ref-type="bibr" rid="B7">7</xref>). Voice and speech enable humans to convey their moods and assert their personality (<xref ref-type="bibr" rid="B8">8</xref>), communicate with their environment, express emotions, and detail thoughts (<xref ref-type="bibr" rid="B9">9</xref>&#x2013;<xref ref-type="bibr" rid="B11">11</xref>). Speech and voice, through both their physiological basis and social use, shape auditory identity, serving as an &#x201C;invisible business card&#x201D; presented to the world (<xref ref-type="bibr" rid="B12">12</xref>).</p>
<p>In recent years, these characteristics have led to the popularization of using vocal biometrics for tasks like voice authentication, speaker detection, and forensic speaker recognition (<xref ref-type="bibr" rid="B13">13</xref>). The uniqueness of voice and speech has even been compared to that of fingerprints (<xref ref-type="bibr" rid="B14">14</xref>), although some authors have contested this comparison (<xref ref-type="bibr" rid="B15">15</xref>). Recording voice and speech is quick, inexpensive, non-invasive, and can be done remotely (<xref ref-type="bibr" rid="B14">14</xref>&#x2013;<xref ref-type="bibr" rid="B17">17</xref>). Furthermore, recognizing people using their voice and being recorded are familiar processes, especially as microphones have become more ubiquitous in modern day life (<xref ref-type="bibr" rid="B18">18</xref>).</p>
<p>Aided by the developments in the field of voice biometrics and the successes of data-driven approaches for consumer speech applications, there has been a recent surge in interest in leveraging human voice and speech as sources of data for healthcare. Voice and speech biomarkers, derived from the nuances in voice and speech, represent a powerful yet underused resource in healthcare (<xref ref-type="bibr" rid="B19">19</xref>). Traditionally, biomarkers have been described as &#x201C;a defined characteristic that is measured as an indicator of normal biological processes, pathogenic processes, or biological responses to an exposure or intervention, including therapeutic interventions&#x201D; (<xref ref-type="bibr" rid="B20">20</xref>). Biomarkers can have different putative applications, ranging from prediction, diagnostic, or monitoring of a condition or a treatment, to the study of pharmacodynamic responses to medications. Voice biomarkers are a part of the new frontier known as digital biomarkers (<xref ref-type="bibr" rid="B21">21</xref>), where behavioural and physiological data is collected using digital devices to aid in diagnosis, monitoring, and treatment of various health conditions (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>). Although the term digital biomarker has recently been under some scrutiny for being misleading, unclear, and ill-defined (<xref ref-type="bibr" rid="B24">24</xref>&#x2013;<xref ref-type="bibr" rid="B26">26</xref>), no other common term has emerged or gained consensus among scholars. Voice biomarkers are defined as distinct vocal attributes, alone or in combination, that have been scientifically validated as indicators of clinical outcomes (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B28">28</xref>). They can be used for symptom detection and monitoring, diagnosis of conditions, screening potential health issues, perceived health status monitoring, and to monitor disease progression (<xref ref-type="bibr" rid="B29">29</xref>). Similar to its use as biometric information, voice biomarkers have the advantage of being easy and relatively inexpensive to collect and preserve in databases (<xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B31">31</xref>).</p>
<p>Even though the idea of using voice as a biomarker of health has been explored for decades (<xref ref-type="bibr" rid="B32">32</xref>), recent developments in artificial intelligence (AI), machine learning (ML), and natural language processing (NLP) have enabled fast, precise analysis of voice data, making it possible to envision the scalable development of voice biomarkers analysis (<xref ref-type="bibr" rid="B33">33</xref>&#x2013;<xref ref-type="bibr" rid="B35">35</xref>). Voice biomarkers have already demonstrated their potential in the diagnosis of diseases and conditions such as Alzheimer&#x0027;s disease (<xref ref-type="bibr" rid="B36">36</xref>), Parkinson&#x0027;s disease (<xref ref-type="bibr" rid="B37">37</xref>), diabetes, developmental problems such as autism (<xref ref-type="bibr" rid="B38">38</xref>), and various mental health problems such as depression, stress, and suicidal ideation (<xref ref-type="bibr" rid="B39">39</xref>, <xref ref-type="bibr" rid="B40">40</xref>). However, there are a limited number of voice datasets meant for clinical use (<xref ref-type="bibr" rid="B41">41</xref>). These datasets are often incomplete, and due to this fact researchers often lack the capacity to consider implications of diversity and privacy control (<xref ref-type="bibr" rid="B42">42</xref>) or properly train algorithms with them (<xref ref-type="bibr" rid="B27">27</xref>). This has led to calls to curate large, scalable, standardized databases to use for research on voice and speech biomarkers (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B43">43</xref>, <xref ref-type="bibr" rid="B44">44</xref>).</p>
<p>While work has been done on the ethical, legal, and social implications (ELSI) of using voice biometrics (<xref ref-type="bibr" rid="B45">45</xref>&#x2013;<xref ref-type="bibr" rid="B48">48</xref>), the collection of voice and speech for clinical use requires a deeper understanding of the ELSI specific to this nascent use (<xref ref-type="bibr" rid="B44">44</xref>, <xref ref-type="bibr" rid="B49">49</xref>). This scoping review aims to present the state of the literature on the ELSI that arise from the collection and use of voice and speech data in healthcare, while also adding to the growing literature on the ELSI of digital biomarkers (<xref ref-type="bibr" rid="B50">50</xref>&#x2013;<xref ref-type="bibr" rid="B53">53</xref>). Moreover, it aims to identify how the collection of voice and speech data for medical and clinical use can redefine some of the ELSI related to digital biomarkers, how it fits with current understanding of the ELSI related to biomedical and clinical data, and to address the gaps in the existing literature and potential areas of concerns. The aim of this analysis is to contribute to the responsible development and use of voice and speech biomarkers.</p>
</sec>
<sec id="s2"><label>2</label><title>Method</title>
<p>A scoping review was realized to appraise and determine the volume of literature and studies available on the ELSI of the use of voice as a biomarker of health (<xref ref-type="bibr" rid="B54">54</xref>, <xref ref-type="bibr" rid="B55">55</xref>). A scoping review approach was selected because it allows for a comprehensive mapping of the diverse studies and disciplines addressing the evolving challenges posed by voice data in healthcare. Rigor was ensured by engaging in a predefined and comprehensive search strategy (<xref ref-type="bibr" rid="B56">56</xref>, <xref ref-type="bibr" rid="B57">57</xref>). This scoping review was conducted in 3 phases. A first phase (Phase 1) consisted in finding articles through a search on databases, a second phase (Phase 2) was added where articles were found through snowball research from Phase 1, and a third phase (Phase 3) was conducted to review new literature. Each phase, and its associated steps, are described below. The charting of the data and the collating, summarizing and reporting of results are presented in the Results section.</p>
<sec id="s2a"><label>2.1</label><title>Phase 1: initial database search</title>
<p>References from this review of the literature were identified through Ovid MEDLINE, Web of Science, Ovid EMBASE and IEEE. Ovid Medline and Ovid Embase were selected to ensure comprehensive coverage of healthcare-related literature, while IEEE Xplore and Web of Science were included to capture engineering perspectives and broader multidisciplinary research. Search terms were categorized into three main themes: <italic>Ethical, legal</italic> or <italic>social implications</italic>, <italic>voice</italic> or <italic>voice as a biomarker,</italic> and <italic>medical technologies.</italic> Established themes were populated using MeSH terms, Emtree terms, Boolean operators, and informed by key search literature. Discussions with experts within the Bridge2AI-Voice Consortium informed the selection of relevant terms relevant to voice biomarkers and voice health, and a university librarian specialized in scoping reviews helped adapt the search strategy to the scope of the database. <xref ref-type="table" rid="T1">Table&#x00A0;1</xref> presents the terms that were used in the different databases.</p>
<table-wrap id="T1" position="float"><label>Table&#x00A0;1</label>
<caption><p>Search strategy.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Concepts</th>
<th valign="top" align="center">Terms</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">ELSI</td>
<td valign="top" align="left"><bold>MD</bold>&#x2009;<bold>&#x003D;</bold>&#x2009;((Ethic&#x002A; or Bioethic&#x002A; or Moral&#x002A; or ELSI or ELSA or ((social or bioethical or ethical) adj (issue&#x002A; or aspect&#x002A; or impact&#x002A; or consequence&#x002A; or implication&#x002A; or &#x201C;effect&#x201D; or &#x201C;effects&#x201D; or consideration&#x002A; or challenge&#x002A;))).mp. or informed consent/ or Bioethical Issues/ or Ethics/) not &#x201C;ethics committee&#x201D;.mp. not &#x201C;ethical approval&#x201D;.mp. not &#x201C;ethics and dissemination&#x201D;.mp.<break/><bold>EMB&#x2009;&#x003D;</bold>&#x2009;((Ethic&#x002A; or Bioethic&#x002A; or Moral&#x002A; or ELSI or ELSA or ((social or bioethical or ethical) adj (issue&#x002A; or aspect&#x002A; or impact&#x002A; or consequence&#x002A; or implication&#x002A; or &#x201C;effect&#x201D; or &#x201C;effects&#x201D; or consideration&#x002A; or challenge&#x002A;))).mp. or informed consent/ or Bioethical Issues/ or Ethics/) not &#x201C;ethics committee&#x201D;.mp. not &#x201C;ethical approval&#x201D;.mp. not &#x201C;ethics and dissemination&#x201D;.mp.<break/><bold>WoS&#x2009;&#x003D;</bold>&#x2009;Ethic&#x002A; OR Bioethic&#x002A; OR Moral&#x002A; OR &#x201C;ELSI&#x201D; OR &#x201C;Public NEAR/0 Policy&#x201D; OR &#x201C;informed NEAR/0 consent&#x201D; OR &#x201C;social NEAR/0 responsibilities&#x201D; OR &#x201C;Data NEAR/2 anonym&#x002A;&#x201D; OR &#x201C;medical NEAR/0 ethics&#x201D; OR ((social or bioethical or ethical) NEAR/3 (issue&#x002A; OR aspect&#x002A; OR impact&#x002A; or consequence&#x002A; OR implication&#x002A; OR effect&#x002A; OR consideration&#x002A; OR challenge&#x002A; OR policy OR policies)) (Topic)<break/><bold>IEEE&#x2009;&#x003D;</bold>&#x2009;Ethic&#x002A; OR Bioethic&#x002A; OR Moral&#x002A; OR ELSI OR ELSA OR &#x201C;social issue&#x201D; OR &#x201C;social aspect&#x201D; OR &#x201C;social impact&#x0022;</td>
</tr>
<tr>
<td valign="top" align="left">Voice</td>
<td valign="top" align="left"><bold>MD</bold>&#x2009;<bold>&#x003D;</bold>&#x2009;(Voice or Vocal or Acoustic or Speech or Bioacoustics or Respirat&#x002A; or Phonation or Resonation or Articulat&#x002A;).mp.<break/><bold>EMB&#x2009;&#x003D;</bold>&#x2009;(Voice or Vocal or Acoustic or Speech or Bioacoustics or Respirat&#x002A; or Phonation or Resonation or Articulat&#x002A;).mp.<break/><bold>WoS&#x2009;&#x003D;</bold>&#x2009;Voice OR Vocal OR Speech OR &#x201C;conversation&#x002A; NEAR/3 agent&#x002A;&#x201D; OR Acoustic OR Bioacoustics OR Respirat&#x002A; OR Phonation OR Resonation OR Articulat&#x002A;<break/><bold>IEEE&#x2009;&#x003D;</bold>&#x2009;Voice OR Vocal OR Speech OR Acoustic</td>
</tr>
<tr>
<td valign="top" align="left">Health Technology</td>
<td valign="top" align="left"><bold>MD</bold>&#x2009;<bold>&#x003D;</bold>&#x2009;[Technolog&#x002A; or Informatic&#x002A; or Biomarker&#x002A; or (&#x201C;health care&#x201D; adj1 technology)].mp.<break/><bold>EMB&#x2009;&#x003D;</bold>&#x2009;[Technolog&#x002A; or Informatic&#x002A; or Biomarker&#x002A; or (&#x201C;health care&#x201D; adj1 technology)].mp.<break/><bold>WoS&#x2009;&#x003D;</bold>&#x2009;Technolog&#x002A; OR Informatic&#x002A; OR Biomarker&#x002A; OR Diagnostic&#x002A; (Topic)<break/><bold>IEEE&#x2009;&#x003D;</bold>&#x2009;Technology OR Technologies OR Informatic OR &#x201C;machine learning&#x201D; OR &#x201C;algorithm&#x201D; OR &#x201C;natural language processing&#x201D; OR &#x201C;artificial intelligence&#x201D; OR biobank OR dataset</td>
</tr>
<tr>
<td valign="top" align="left">Legend</td>
<td valign="top" align="left">MD&#x2009;&#x003D;&#x2009;MedLine; EMB&#x2009;&#x003D;&#x2009;Embase; WoS&#x2009;&#x003D;&#x2009;Web of Science; IEEE&#x2009;&#x003D;&#x2009;Institute of Electrical and Electronics Engineers</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The initial database search was carried out on December 14, 2022, for all four databases, and 2,393 articles extracted were uploaded into Covidence (<xref ref-type="bibr" rid="B58">58</xref>). Three hundred eighty-four (384) duplicates were removed. For the first screening, the team outlined a list of inclusion criteria: articles were included if the appraisal of their title and abstract met the following criteria: (1) written in English or French; (2) defined as a peer-reviewed article, commentary, editorial, review, or discussion paper; and (3) included topics that intersected the three main themes from the literature review. No restrictions were placed on the date of publication, design, or date of publication. Three screeners persons (MFM, SBG, QH) independently applied the inclusion and exclusion criteria on the 2,009 remaining papers (<xref ref-type="bibr" rid="B59">59</xref>). In instances of disagreement, consensus was achieved through discussion between the three reviewers, with advice from someone with experience in literature reviews (JCBP).</p>
<p>The title and abstract screening process removed 1,899 papers from the scoping review at hand. Full-text review was then done to assess the eligibility of the 110 remaining papers. Following exclusion criteria were used to sort and evaluate studies: (1) were not English or French; (2) did not address questions ELSIs; (3) did not relate to voice or speech; (4) did not relate to voice data collection or voice data usage in technology; (5) were not healthcare related; (6) book sections or chapters; (7) had no full text available. The same screeners (MFM, SBG, QH) independently applied the exclusion criteria. Again, disagreements were discussed between reviewers until consensus was achieved. In the end, 24 studies were selected, with advice from the principal investigator (JCBP).</p>
</sec>
<sec id="s2b"><label>2.2</label><title>Phase 2: snowball search</title>
<p>Considering the small number of articles included in the scoping review and the key aim of a scoping review to assess the extent of discussion around a topic, the research team (MFM, SBG, JCBP), following the recommendations of a university librarian specialized in scoping reviews, carried out a second round of research snowballing from the 24 included papers. Two members of the team (MFM, SBG) retrieved all the papers cited in the 24 papers, as well as all the articles citing these included papers. This work added 1,650 articles to the original search. A second round of title, abstract, and full-text screening was carried out on these articles, following the same criteria identified above. At the end of this process, 24 additional papers were included increasing the total number of studies in the scoping review to 48.</p>
</sec>
<sec id="s2c"><label>2.3</label><title>Coding of the results from phase 1 and phase 2</title>
<p>Two articles were coded using NVivo 14 (<xref ref-type="bibr" rid="B60">60</xref>) by members of the team (MFM, SBG) to create the initial coding frame (<xref ref-type="bibr" rid="B61">61</xref>). The coding frame was developed around 5 overarching themes: (1) voice as a biomarker; (2) ethical implications; (3) social implications; (4) legal implications; (5) mitigating solutions. The coding frame was reviewed by an experienced coder (JCBP), who provided comments and guidance. During the coding process, members of the team (MFM, SBG) reviewed two papers with the initial coding frame to assess the degree of intra-coder agreement. Two coders (MFM, SBG) reviewed the remaining data individually. New codes were inductively added to the initial coding frame based on the content of the articles. Weekly meetings between the two coders were organized to ensure agreement between coders. After coding, the two coding files were merged to create a final coding grid. Each article was reviewed with the final coding grid to ensure a consistent coding.</p>
</sec>
<sec id="s2d"><label>2.4</label><title>Phase 3: subsequent database search and analysis of results</title>
<p>A second search of the previously identified databases with the same terms was carried out in April 2024 to review articles published since the initial search in December 2022. Following this, the articles found were uploaded to Covidence and two team members (MFM, AG) used the same inclusion criteria to sort through the title and abstract and through the full text. In total, 413 articles were imported for title and abstract screening, 43 full-text articles were reviewed, and 17 articles were included, bringing the total number of articles in the review to 65. <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref> presents our review flowchart following PRISMA&#x0027;s guidelines (<xref ref-type="bibr" rid="B62">62</xref>) for all three stages. Papers were thematically analyzed using NVivo 14 (<xref ref-type="bibr" rid="B60">60</xref>). The initial coding grid used was the final coding grid from stage 2.3. During the coding process, members of the team (MFM, AG) reviewed two papers with the coding grid to assess the degree of intra-coder agreement. Research team members (MFM, AG) reviewed the remaining data individually. New codes were inductively added by MFM and AG to the coding grid based on the content of the articles. Weekly meetings between the two coding team members were organized to ensure agreement between coders. After coding, the two coding files were merged to create a final coding grid. Each article (from the first and subsequent database search) was reviewed with the final coding grid to ensure a consistent coding.</p>
<fig id="F1" position="float"><label>Figure&#x00A0;1</label>
<caption><p>Prisma flowchart of search outcomes.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-08-1750111-g001.tif"><alt-text content-type="machine-generated">Flowchart illustrating a research study selection process across three stages: Initial Screening, Subsequent Database Screening, and Snowballing. The process begins with literature search across various databases, followed by filtering out duplicates and screening for eligibility. The final selection results in the inclusion of 65 studies. Each stage outlines reasons for exclusion, such as irrelevance and lack of data.</alt-text>
</graphic>
</fig>
</sec>
</sec>
<sec id="s3" sec-type="results"><label>3</label><title>Results</title>
<p>Results are presented following the different themes of the coding. Each of the three types of implications (ethical, social, and legal) are examined in two ways. The first category, &#x201C;conventional implications&#x201D;, covers issues arising from the use of voice and speech data in healthcare that parallel those linked to other biometric data, such as gait or ocular movement analysis (<xref ref-type="bibr" rid="B63">63</xref>, <xref ref-type="bibr" rid="B64">64</xref>), and defines where voice and speech are treated as continuous with existing health data ethics discussions. The second category, &#x201C;modality-specific implications&#x201D;, addresses ELSI that require refinement or revision due to the unique characteristics of voice and speech data, and defines where voice and speech data are challenging conversional frames.</p>
<p>As noted, not all of the included articles were specifically related to voice biomarker research or the use of voice biomarkers in healthcare. Although they addressed voice biomarker research and use, some articles dealt with digital biomarker research more generally, the use of smartphones or sensors in healthcare, or new trends in the treatment of a specific disease. Contrasting these implications with those brought on specifically by voice and speech data makes it possible to recognize the distinct risks and responsibilities associated with voice, while maintaining coherence across wider conversations on health data governance. In each of those categories, the implications are presented in order of how frequently they were mentioned by authors.</p>
<sec id="s3a"><label>3.1</label><title>Overview of results</title>
<p>The 65 included articles were published between 2009 and 2024, with most of the articles published between 2019 and 2024 from teams in different parts of the world. <xref ref-type="fig" rid="F2">Figure 2</xref> presents a timeline of the number of included publications per year. <xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref> presents a world map of the different countries of either the first author or the research teams. About half of the articles (<italic>n</italic>&#x2009;&#x003D;&#x2009;34) were reviews (e.g., scoping review, systematic review, contemporary review). <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref> presents a pie chart of the different types of articles included in this review. They touched on a variety of diseases, ranging from mental health conditions and behavioral health to Alzheimer&#x0027;s disease, Parkinson disease, heart failure, Covid-19, and speech disorders. <xref ref-type="fig" rid="F5">Figure&#x00A0;5</xref> presents the field area of included articles. All selected publications included elements related to the use or interpretation of voice and speech data in healthcare or research settings. As mentioned previously, to be included, publications were required to meaningfully engage with the topics of voice and speech biomarkers, whether in a dedicated section or as part of a wider discussion. In practice, this engagement varied across the literature: in some publications, voice biomarkers constituted the main focus of the research, with detailed conceptual, technical, or clinical discussions; in others, they appeared more tangentially, often mentioned as emerging tools within larger explorations of artificial intelligence, digital phenotyping, or data-driven health approaches.</p>
<fig id="F2" position="float"><label>Figure&#x00A0;2</label>
<caption><p>Number of included articles published per year (2008-2024).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-08-1750111-g002.tif"><alt-text content-type="machine-generated">Line graph showing a data trend from 2006 to 2024. Values remain low and stable until 2017, then sharply increase to peak twice, in 2020 and 2023, before declining.</alt-text>
</graphic>
</fig>
<fig id="F3" position="float"><label>Figure&#x00A0;3</label>
<caption><p>Geographic distribution of included articles based on the country of the first author or research team. This map illustrates the global distribution of the 67 articles included in the review. The majority originated from the United States of America (<italic>n</italic>&#x2009;&#x003D;&#x2009;27), followed by Germany (<italic>n</italic>&#x2009;&#x003D;&#x2009;8), Canada and the United Kingdom (<italic>n</italic>&#x2009;&#x003D;&#x2009;5 each), Switzerland and Australia (<italic>n</italic>&#x2009;&#x003D;&#x2009;3 each), and several other countries contributing one or two publications each (Italy&#x2009;&#x003D;&#x2009;2, Greece&#x2009;&#x003D;&#x2009;1, India&#x2009;&#x003D;&#x2009;2, Taiwan&#x2009;&#x003D;&#x2009;1, Colombia&#x2009;&#x003D;&#x2009;1, Netherlands&#x2009;&#x003D;&#x2009;2, Ireland&#x2009;&#x003D;&#x2009;2, France&#x2009;&#x003D;&#x2009;2, Japan&#x2009;&#x003D;&#x2009;1, Israel&#x2009;&#x003D;&#x2009;1, Luxembourg&#x2009;&#x003D;&#x2009;1). Countries are shaded according to the number of publications, with darker tones indicating a higher number of articles. World map created using MapChart (<ext-link ext-link-type="uri" xlink:href="https://www.mapchart.net/">https://www.mapchart.net/</ext-link>), licensed under <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by-sa/4.0/">CC BY-SA 4.0</ext-link>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-08-1750111-g003.tif"><alt-text content-type="machine-generated">World map displaying countries shaded in different purples based on the number of articles. Darkest purple indicates 11+ articles in the USA, Canada, and several European countries. Intermediate shades represent fewer articles in regions like India, Australia, and Japan. Countries with zero articles are not shaded.</alt-text>
</graphic>
</fig>
<fig id="F4" position="float"><label>Figure&#x00A0;4</label>
<caption><p>Publication types of included articles. The figure displays the distribution of article types, ordered by frequency: reviews (<italic>n</italic>&#x2009;&#x003D;&#x2009;34), literature reviews (<italic>n</italic>&#x2009;&#x003D;&#x2009;8), perspectives (<italic>n</italic>&#x2009;&#x003D;&#x2009;5), qualitative research articles (<italic>n</italic>&#x2009;&#x003D;&#x2009;4), surveys (<italic>n</italic>&#x2009;&#x003D;&#x2009;3), whitepapers (<italic>n</italic>&#x2009;&#x003D;&#x2009;3), and single-publication categories including commentary, longitudinal waitlist-control field study, opinion, position paper, quantitative study, text and opinion, viewpoint, and workshop summary (<italic>n</italic>&#x2009;&#x003D;&#x2009;1).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-08-1750111-g004.tif"><alt-text content-type="machine-generated">Pie chart depicting the distribution of document types. Review holds the largest portion at 52.3%, followed by literature review at 12.3%, perspective at 7.7%, qualitative research at 6.2%, survey and whitepaper each at 4.6%, and smaller segments for commentary, opinion, text and opinion, and workshop summary, each at 1.5%.</alt-text>
</graphic>
</fig>
<fig id="F5" position="float"><label>Figure&#x00A0;5</label>
<caption><p>Field of health research of included studies. The figure presents the distribution of health conditions examined across the included papers, ordered by frequency: mental health and mood disorders (<italic>n</italic>&#x2009;&#x003D;&#x2009;30), speech and communication disorders (<italic>n</italic>&#x2009;&#x003D;&#x2009;8), Alzheimer&#x0027;s disease and cognitive decline (<italic>n</italic>&#x2009;&#x003D;&#x2009;8), general healthcare applications (<italic>n</italic>&#x2009;&#x003D;&#x2009;6), Parkinson&#x0027;s disease (<italic>n</italic>&#x2009;&#x003D;&#x2009;5), neuropsychiatric disorders (<italic>n</italic>&#x2009;&#x003D;&#x2009;3), and single-paper categories including COVID-19, heart failure, and Huntington&#x0027;s disease (<italic>n</italic>&#x2009;&#x003D;&#x2009;1).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdgth-08-1750111-g005.tif"><alt-text content-type="machine-generated">Pie chart depicting various healthcare categories. Mental health and mood disorders constitute 47.6%, Alzheimer's disease and cognitive decline 12.7%, speech and communication disorders 12.7%, general healthcare 9.5%, Parkinson&#x2019;s disease 7.9%, and neuropsychiatric disorders 4.8%.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s3b"><label>3.2</label><title>Ethical implications</title>
<sec id="s3b1"><label>3.2.1</label><title>Conventional implications</title>
<sec id="s3b1a"><label>3.2.1.1</label><title>Privacy and security</title>
<p>A number of publications report privacy issues arising from continuous, passive collection of voice and speech data (<xref ref-type="bibr" rid="B65">65</xref>&#x2013;<xref ref-type="bibr" rid="B68">68</xref>). Privacy is a major concern for any remote monitoring approach (<xref ref-type="bibr" rid="B69">69</xref>), as the guarantees on privacy protections that can be provided are limited (<xref ref-type="bibr" rid="B70">70</xref>) and because privacy has no formal legal definitions (<xref ref-type="bibr" rid="B71">71</xref>). However, privacy regulations have been described as stifling to technological innovations (<xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B72">72</xref>).</p>
<p>Articles highlight two main types of considerations. First, continuous data sharing is required to capture the amount of data accumulated through continuous and passive data-collection methods, which increases the risk of privacy breaches during this process (<xref ref-type="bibr" rid="B67">67</xref>, <xref ref-type="bibr" rid="B73">73</xref>&#x2013;<xref ref-type="bibr" rid="B75">75</xref>). Storing large amounts of data can also increase the risk of a privacy breach or data leakage, and also the likelihood of data being sold in the secondary data market (<xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B72">72</xref>, <xref ref-type="bibr" rid="B74">74</xref>, <xref ref-type="bibr" rid="B76">76</xref>&#x2013;<xref ref-type="bibr" rid="B79">79</xref>). Second, continuous recording can also quickly become perceived as surveillance (<xref ref-type="bibr" rid="B80">80</xref>). One participant in a study from Hurley &#x0026; al (<xref ref-type="bibr" rid="B76">76</xref>). mentioned that constant data collection on any type of data is &#x201C;overkill&#x201D;. In the same study, another participant described it as a form of &#x201C;personal intrusiveness&#x201D;. This can apply to the patients or the subject being recorded, but also to their caregivers as well who may also have their own concerns over monitoring (<xref ref-type="bibr" rid="B81">81</xref>). To mitigate this concern, Popp &#x0026; al (<xref ref-type="bibr" rid="B82">82</xref>)., in a review on the shift from active to passive monitoring of Alzheimer&#x0027;s disease, mention that mandating that a person interact with a device can also represent a conscious effort on people&#x0027;s part to remind them that they are being recorded and lessen the impression of passive surveillance and loss of control, especially for people with chronic or degenerative condition that affect their own sense of control (<xref ref-type="bibr" rid="B72">72</xref>). Similarly, Bailey, Patel &#x0026; Guari (<xref ref-type="bibr" rid="B83">83</xref>), while focusing on data collection involving minors, raise a broader concern: the trade-off between gathering more data to train models effectively and maintaining privacy protections. This tension is relevant across all populations, particularly when sensitive data like voice is involved.</p>
<p>Voice and speech data is also potentially identifiable and can be used for multiple nefarious undertakings. For example, this data can be used to surveil individuals and intrude on their private lives (<xref ref-type="bibr" rid="B73">73</xref>, <xref ref-type="bibr" rid="B76">76</xref>, <xref ref-type="bibr" rid="B80">80</xref>), especially if voice data provides a window in an individual&#x0027;s thoughts (<xref ref-type="bibr" rid="B76">76</xref>). That private information could then be used by advertising companies to target individuals in a compromised emotional state (<xref ref-type="bibr" rid="B68">68</xref>, <xref ref-type="bibr" rid="B72">72</xref>, <xref ref-type="bibr" rid="B79">79</xref>, <xref ref-type="bibr" rid="B84">84</xref>). One article also mention the potential for mass surveillance of whole sub-groups of the population (<xref ref-type="bibr" rid="B85">85</xref>).</p>
<p>Research on voice and speech data also sometimes requires and benefits from the sharing of data in publicly available repositories (<xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B86">86</xref>) or with other research groups (<xref ref-type="bibr" rid="B87">87</xref>). Ensuring the security of the data when sharing with other research groups is a specific concern raised by one article (<xref ref-type="bibr" rid="B81">81</xref>). With respect to privacy, participants have different expectations regarding data access and data sharing (<xref ref-type="bibr" rid="B81">81</xref>, <xref ref-type="bibr" rid="B88">88</xref>, <xref ref-type="bibr" rid="B89">89</xref>), and even if authors mention that there should be a clear understanding in consent forms regarding the types of data that will be shared, to whom, how, and why (<xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B90">90</xref>), there seems to be a lack of understanding of what people want regarding their privacy in voice processing (<xref ref-type="bibr" rid="B91">91</xref>). These concerns also apply to data collection done for research, as well as data collected for clinical use. However, privacy protection can be harder to sustain in smaller research projects, because the identity of the subjects is often known (<xref ref-type="bibr" rid="B92">92</xref>). Sharing voice data to a new group of researchers can also lead to re-identification of subjects because data subjects via other means, for example with cross-identification with other datasets (<xref ref-type="bibr" rid="B75">75</xref>).</p>
</sec>
<sec id="s3b1b"><label>3.2.1.2</label><title>Informed consent</title>
<p>A study by Hurley et al. on multimodal computer perception and neurotechnology mentions that, for data collections on new types of data like voice, it can be hard, at the time of consent, to explain to people and patients how they can be harmed if their data is leaked to unwanted third parties and how that can lead to long-term risks that are difficult to anticipate (<xref ref-type="bibr" rid="B76">76</xref>). This can affect the transparency towards participants, who must understand what data is collected, when and where (<xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B75">75</xref>). Additionally, participants&#x0027; level of literacy (general and technical) can affect how well they grasp the content of consent forms (<xref ref-type="bibr" rid="B66">66</xref>, <xref ref-type="bibr" rid="B90">90</xref>). Information on data collection, storage, and usage should thus be written in language that is appropriate and adapted to the group of participants (<xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B90">90</xref>).</p>
</sec>
<sec id="s3b1c"><label>3.2.1.3</label><title>Accessibility of data</title>
<p>Some articles mention that voice data sharing between research groups can be difficult on a practical level. For example, voice data (or their derivatives) can be stored in a format that is not convenient or transmittable, or else different research projects can involve specific tasks, recording conditions, diagnostic criteria, or populations, that can limit the utility of combining different datasets (<xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B87">87</xref>). Some conditions are also rarer, which can make recruitment difficult and time-consuming for researchers (<xref ref-type="bibr" rid="B65">65</xref>). Some datasets also lack sensitive characterizing information about participant, like some demographic information or comorbidities, making it difficult to use for research groups (<xref ref-type="bibr" rid="B93">93</xref>).</p>
</sec>
</sec>
<sec id="s3b2"><label>3.2.2</label><title>Modality-specific implications</title>
<sec id="s3b2a"><label>3.2.2.1</label><title>Privacy and security</title>
<p>Patient privacy and confidentiality are major concerns for any type of clinical data collection or clinical monitoring approach, but the engineering and computing behind privacy-preserving techniques for voice and speech data present unique difficulties. Encryption, anonymization, and de-identification, typical safeguards that assure the privacy of people, have proven to be more challenging with voice and speech data than with other biometric information (like facial or iris recognition) (<xref ref-type="bibr" rid="B73">73</xref>, <xref ref-type="bibr" rid="B81">81</xref>, <xref ref-type="bibr" rid="B88">88</xref>, <xref ref-type="bibr" rid="B89">89</xref>, <xref ref-type="bibr" rid="B94">94</xref>&#x2013;<xref ref-type="bibr" rid="B96">96</xref>). This is because techniques cannot readily be transferred (the variability in speech signals means that speakers don&#x0027;t utilize a template or print in the same way as other biometric data) (<xref ref-type="bibr" rid="B74">74</xref>), or are otherwise limited in their effectiveness (<xref ref-type="bibr" rid="B97">97</xref>, <xref ref-type="bibr" rid="B98">98</xref>). Those considerations also extend to third-party individuals if their voice and speech are also being recorded by sensors (<xref ref-type="bibr" rid="B73">73</xref>). And while specific privacy-preserving techniques and methodologies are mentioned (for example, automatic feature extraction, subsampling (<xref ref-type="bibr" rid="B75">75</xref>), federated learning (<xref ref-type="bibr" rid="B65">65</xref>), encryption of voice and speech data (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B75">75</xref>), and privacy-by design (<xref ref-type="bibr" rid="B71">71</xref>), those can quickly become obsolete as new technologies emerge to intrude and access data in an unauthorized manner.</p>
<p>There is also evidence that information about a speaker, especially sociodemographic attributes, can be gathered from a voice recording alone (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B74">74</xref>, <xref ref-type="bibr" rid="B91">91</xref>, <xref ref-type="bibr" rid="B98">98</xref>). This can include (but is not limited to) body measurements, age, gender, sleepiness, level of intoxication, native language, and socioeconomic status (<xref ref-type="bibr" rid="B91">91</xref>), as well as sexual preferences (<xref ref-type="bibr" rid="B71">71</xref>), some of which correspond to personal identifiable information (PII) (<xref ref-type="bibr" rid="B98">98</xref>). But, up to now, the discourse surrounding privacy in voice and speech data has paid little attention voice-inferred information (<xref ref-type="bibr" rid="B91">91</xref>) and how its understanding can reinforce concerns surrounding data privacy and second-hand analysis of data (<xref ref-type="bibr" rid="B67">67</xref>). However, it is important to mention that the collection of voice and speech data can mean a lot of different things. As such, different types of voice and speech data (speech, voice, breathing, laughing, coughing, etc.) present different risks (<xref ref-type="bibr" rid="B76">76</xref>, <xref ref-type="bibr" rid="B98">98</xref>). For example, breathing sounds generally contain less sensitive information than speech and audio recordings of conversations (<xref ref-type="bibr" rid="B99">99</xref>).</p>
<p>Finally, one article mentions the risk of the recording of unflattering or nefarious content (for example recording of people in the bathroom, but also more deeming content like recordings of threats or other illegal activities), specifically through continuous voice recording (<xref ref-type="bibr" rid="B100">100</xref>). This could entail the possibility that research groups must break confidentiality for legal reasons (<xref ref-type="bibr" rid="B100">100</xref>). Another article also mentions that the privacy of conversational home devices, which can be used to capture voice and speech data, is not guaranteed, as it has already been shown that private conversations have been sent to unintended recipients (<xref ref-type="bibr" rid="B83">83</xref>).</p>
</sec>
<sec id="s3b2b"><label>3.2.2.2</label><title>Informed consent</title>
<p>Ensuring informed consent prior to the data collection of voice and speech is essential. As mentioned in several articles, one of the major advantages of voice and speech biomarkers is the ability to analyze data continuously instead of only during the occasional, punctual encounters with their healthcare professionals (<xref ref-type="bibr" rid="B84">84</xref>, <xref ref-type="bibr" rid="B97">97</xref>, <xref ref-type="bibr" rid="B98">98</xref>, <xref ref-type="bibr" rid="B101">101</xref>, <xref ref-type="bibr" rid="B102">102</xref>). But continuous recording, in addition to coming with issues of risk to individual privacy, may also imply the recording of people who are not aware that they are being recorded and, perhaps most importantly, have not consented to that recording (<xref ref-type="bibr" rid="B68">68</xref>, <xref ref-type="bibr" rid="B73">73</xref>, <xref ref-type="bibr" rid="B76">76</xref>, <xref ref-type="bibr" rid="B79">79</xref>, <xref ref-type="bibr" rid="B88">88</xref>, <xref ref-type="bibr" rid="B94">94</xref>, <xref ref-type="bibr" rid="B95">95</xref>, <xref ref-type="bibr" rid="B100">100</xref>). This can include people in the background who are just passing within range of the recording device or people having a conversation with patients (<xref ref-type="bibr" rid="B79">79</xref>). Gathering consent from those individuals is, however, highly unfeasible. Not only can it place patients in uncomfortable situations where they might have to disclose their conditions to people they would not normally divulge it to (<xref ref-type="bibr" rid="B76">76</xref>, <xref ref-type="bibr" rid="B92">92</xref>), but it is also far too demanding.</p>
<p>Some articles also highlight the fact that it may be difficult to consent to continuous recording because people cannot know in advance the events that they will encounter and will want to keep private (<xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B76">76</xref>). Also, the large amount of voice and speech data gathered increases the privacy risks for people (<xref ref-type="bibr" rid="B70">70</xref>). And, since different types of data can be collected with a voice recording (e.g., breathing, free speech), voice and speech data can present somewhat of a unique challenge compared to other types of data collection (<xref ref-type="bibr" rid="B100">100</xref>).</p>
<p>Finally, as with other types of data, there is a risk of uncovering incidental finding related to a person&#x0027;s health (<xref ref-type="bibr" rid="B70">70</xref>). This could happen, for example, if research is conducted on a sample from an asymptomatic person (<xref ref-type="bibr" rid="B75">75</xref>). However, even if the norm is to allow people to decide, at the time of consent, if they would like to be informed of incidental findings (<xref ref-type="bibr" rid="B70">70</xref>), and to share the diagnosis, especially if early treatment is available and beneficial (<xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B94">94</xref>, <xref ref-type="bibr" rid="B103">103</xref>, <xref ref-type="bibr" rid="B104">104</xref>), sharing results from voice analysis can be risky, as clinical utility of voice biomarker results is uncertain (<xref ref-type="bibr" rid="B75">75</xref>).</p>
</sec>
<sec id="s3b2c"><label>3.2.2.3</label><title>Data</title>
<sec id="s3b2c1"><label>3.2.2.3.1</label><title>Quality of Data</title>
<p>The quality of the speech and voice data used to train algorithms and analyze the voice and speech of patients is a key ethical concern in this field. The type of device used, the environment, and other people in the recordings are all different real-world issues that can hinder the effectiveness of the analysis of voice and speech biomarkers (<xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B105">105</xref>&#x2013;<xref ref-type="bibr" rid="B108">108</xref>). One paper also mentions that the use of mobile devices in research studies is practical (mimics natural behaviors, less expensive than providing specific study devices), but that it can lead to noise from differences in hardware and software that may need to be corrected before the data can be analyzed and may hinder sharing across research groups (<xref ref-type="bibr" rid="B69">69</xref>). Additionally, there are no standardized data formats and recording protocols for voice and speech recordings in healthcare, making it difficult to develop large databases from different sources (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B65">65</xref>). This poses a problem given the need for a sufficiently large quantity of data to be able to train AI and ML algorithm (<xref ref-type="bibr" rid="B75">75</xref>).</p>
<p>Finally, because voice and speech can be affected by energy levels, the timing of voice data collection is crucial. Information about the patients energy levels should thus be noted (<xref ref-type="bibr" rid="B81">81</xref>).</p>
</sec>
<sec id="s3b2c2"><label>3.2.2.3.2</label><title>Quantity of Data</title>
<p>Several articles mention that the limited number of substantial, clinically relevant voice datasets is an issue to train algorithms (<xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B107">107</xref>&#x2013;<xref ref-type="bibr" rid="B110">110</xref>). However, collecting more data is not necessarily a viable solution, as storage of a large quantity of data can be expensive and require substantial equipment for individual researchers or teams (<xref ref-type="bibr" rid="B82">82</xref>, <xref ref-type="bibr" rid="B90">90</xref>, <xref ref-type="bibr" rid="B108">108</xref>, <xref ref-type="bibr" rid="B109">109</xref>, <xref ref-type="bibr" rid="B111">111</xref>). The storage capacities of research groups can also directly impact groups&#x0027; data collection quantity and preferences (<xref ref-type="bibr" rid="B70">70</xref>). In addition, data collection is time intensive for research teams, especially if data needs to be edited to remove unwanted audio and transferred to another format (<xref ref-type="bibr" rid="B111">111</xref>). Kr&#x00F6;ger &#x0026; al. mention that there is an imbalance in the availability of voice and speech data between private companies, whose data collection is often &#x201C;technically authorized but not actively consented&#x201D; (<xref ref-type="bibr" rid="B82">82</xref>), and publicly funded research teams, which rely on available datasets or who have to gather data themselves (<xref ref-type="bibr" rid="B91">91</xref>).</p>
<p>Several articles identified the small number of training data as one of the limitations of their studies or of other studies they reviewed (<xref ref-type="bibr" rid="B87">87</xref>, <xref ref-type="bibr" rid="B93">93</xref>, <xref ref-type="bibr" rid="B101">101</xref>, <xref ref-type="bibr" rid="B105">105</xref>, <xref ref-type="bibr" rid="B107">107</xref>). One author also mentions that some studies even publish without the presence of a control group (<xref ref-type="bibr" rid="B101">101</xref>). There is also a lack of longitudinal data on voice and speech that include data from acute illness and beyond (<xref ref-type="bibr" rid="B93">93</xref>). The lack of availability of large-scale studies means that the switch from feasibility studies to large-scale development of voice biomarkers will be hard (<xref ref-type="bibr" rid="B93">93</xref>).</p>
</sec>
<sec id="s3b2c3"><label>3.2.2.3.3</label><title>Validation of Data</title>
<p>There are concerns about validating the results of voice and speech analysis, raising questions about the clinical introduction of technologies based on voice and speech biomarkers. The ability to validate results from multiple populations, in multiple settings, and with a good enough reliability will be key to introducing the use of voice and speech biomarkers into the clinical care (<xref ref-type="bibr" rid="B70">70</xref>, <xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B91">91</xref>, <xref ref-type="bibr" rid="B94">94</xref>, <xref ref-type="bibr" rid="B109">109</xref>, <xref ref-type="bibr" rid="B111">111</xref>, <xref ref-type="bibr" rid="B112">112</xref>). Moreover, some authors question the validity of voice and speech analysis that is detached from the person, mentioning the false interpretations that can happen once the voice and speech data are taken out of context or analyzed without some critical background information on that person (<xref ref-type="bibr" rid="B93">93</xref>, <xref ref-type="bibr" rid="B113">113</xref>). Finally, various studies mention the need to validate the results from vocal and speech biomarkers with &#x201C;gold standards&#x201D; (<xref ref-type="bibr" rid="B27">27</xref>) or other biomarkers (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B76">76</xref>, <xref ref-type="bibr" rid="B77">77</xref>, <xref ref-type="bibr" rid="B79">79</xref>, <xref ref-type="bibr" rid="B94">94</xref>, <xref ref-type="bibr" rid="B109">109</xref>, <xref ref-type="bibr" rid="B114">114</xref>, <xref ref-type="bibr" rid="B115">115</xref>). Without such standards, the switch from feasibility studies to large-scale development of voice biomarkers will not happen (<xref ref-type="bibr" rid="B27">27</xref>). Proper validation of diverse samples and datasets will be essential to scale-up voice and speech biomarker analysis (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B87">87</xref>, <xref ref-type="bibr" rid="B111">111</xref>, <xref ref-type="bibr" rid="B114">114</xref>). This will mean validating results in different conditions (<xref ref-type="bibr" rid="B72">72</xref>, <xref ref-type="bibr" rid="B79">79</xref>), but also for different populations and sub-populations (<xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B108">108</xref>).</p>
</sec>
</sec>
<sec id="s3b2d"><label>3.2.2.4</label><title>Respect for People</title>
<p>Voice and speech data collection can demand that people perform certain tasks like breathing, reading texts, talking about a subject for a couple minutes, etc. (<xref ref-type="bibr" rid="B82">82</xref>). For some conditions targeted by research on voice and speech biomarkers, those tasks can prove to be difficult. For example, Petti et al. (<xref ref-type="bibr" rid="B75">75</xref>) mention that for individuals with Alzheimer&#x0027;s disease, some tasks can be humiliating and can cause distress, resulting in frustration and anger (<xref ref-type="bibr" rid="B75">75</xref>).</p>
</sec>
</sec>
</sec>
</sec>
<sec id="s3c"><label>3.3</label><title>Social implications</title>
<sec id="s3c1"><label>3.3.1</label><title>Conventional implications</title>
<sec id="s3c1a"><label>3.3.1.1</label><title>Bias and lack of diversity</title>
<p>Bias in datasets can contribute to unfair, unbalanced, and even discriminatory studies and analysis of voice and speech biomarkers (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B68">68</xref>, <xref ref-type="bibr" rid="B72">72</xref>, <xref ref-type="bibr" rid="B76">76</xref>, <xref ref-type="bibr" rid="B83">83</xref>, <xref ref-type="bibr" rid="B85">85</xref>, <xref ref-type="bibr" rid="B98">98</xref>, <xref ref-type="bibr" rid="B100">100</xref>, <xref ref-type="bibr" rid="B102">102</xref>, <xref ref-type="bibr" rid="B107">107</xref>, <xref ref-type="bibr" rid="B114">114</xref>). Biased datasets may not adequately represent the diversity of human emotions across cultures, genders, and age groups (<xref ref-type="bibr" rid="B88">88</xref>, <xref ref-type="bibr" rid="B103">103</xref>, <xref ref-type="bibr" rid="B116">116</xref>). Given that an AI or ML algorithms are susceptible to &#x201C;learning&#x201D; biases present in the training data, it is imperative to tackle these considerations early on in their development, since all the previous mentioned implications can be perpetuated once an algorithm is integrated into clinical care (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B65">65</xref>, <xref ref-type="bibr" rid="B82">82</xref>, <xref ref-type="bibr" rid="B85">85</xref>, <xref ref-type="bibr" rid="B86">86</xref>, <xref ref-type="bibr" rid="B89">89</xref>, <xref ref-type="bibr" rid="B93">93</xref>, <xref ref-type="bibr" rid="B100">100</xref>, <xref ref-type="bibr" rid="B108">108</xref>, <xref ref-type="bibr" rid="B116">116</xref>, <xref ref-type="bibr" rid="B117">117</xref>).</p>
<p>Bias mitigation, both in the training datasets and in the validation datasets is essential (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B82">82</xref>, <xref ref-type="bibr" rid="B83">83</xref>, <xref ref-type="bibr" rid="B85">85</xref>, <xref ref-type="bibr" rid="B88">88</xref>, <xref ref-type="bibr" rid="B92">92</xref>, <xref ref-type="bibr" rid="B94">94</xref>, <xref ref-type="bibr" rid="B103">103</xref>, <xref ref-type="bibr" rid="B104">104</xref>, <xref ref-type="bibr" rid="B107">107</xref>, <xref ref-type="bibr" rid="B116">116</xref>, <xref ref-type="bibr" rid="B118">118</xref>&#x2013;<xref ref-type="bibr" rid="B121">121</xref>). This type of mitigation will require the involvement of diverse stakeholders to be able to understand and consider their different needs and contexts (<xref ref-type="bibr" rid="B82">82</xref>, <xref ref-type="bibr" rid="B118">118</xref>).</p>
</sec>
<sec id="s3c1b"><label>3.3.1.2</label><title>Discrimination</title>
<p>One paper in this review also discussed how the use of &#x201C;big data&#x201D; in healthcare could enable discrimination and predatory practices. While this is not specific to voice and speech, they do mention how this could disproportionately affect some individuals with mental health conditions, one category of condition that is extensively researched in the voice biomarkers field (<xref ref-type="bibr" rid="B72">72</xref>). The accumulation of emotional data about a person could lead to unintended consequences, like emotional profiling, which could also lead to discrimination (<xref ref-type="bibr" rid="B68">68</xref>).</p>
</sec>
</sec>
<sec id="s3c2"><label>3.3.2</label><title>Modality-specific implications</title>
<sec id="s3c2a"><label>3.3.2.1</label><title>Bias and lack of diversity</title>
<p>Although the analysis of voice and speech biomarkers using AI is potentially a way to capture a noninvasive (<xref ref-type="bibr" rid="B69">69</xref>, <xref ref-type="bibr" rid="B75">75</xref>, <xref ref-type="bibr" rid="B77">77</xref>, <xref ref-type="bibr" rid="B93">93</xref>, <xref ref-type="bibr" rid="B98">98</xref>, <xref ref-type="bibr" rid="B99">99</xref>), relatively inexpensive (<xref ref-type="bibr" rid="B93">93</xref>, <xref ref-type="bibr" rid="B99">99</xref>, <xref ref-type="bibr" rid="B101">101</xref>, <xref ref-type="bibr" rid="B106">106</xref>) snapshot of cognition, tissue integrity, and motor function, issues arise related to bias and representation in datasets and algorithms. &#x201C;Typical adult speakers&#x201D; (i.e., people without pathological speech, adults, people not in minority communities) appear to be overrepresented in databases (<xref ref-type="bibr" rid="B92">92</xref>), and gender and racial disparities remain an unexplored area in voice and speech research and technology development (<xref ref-type="bibr" rid="B46">46</xref>). Additionally, when research projects look specifically at gender representation in data, they find that men are disproportionately represented (<xref ref-type="bibr" rid="B86">86</xref>). Additionally, to be used on a large scale, voice and speech data must be linked with sociodemographic information about a person, which could increase pre-existing types of discrimination (<xref ref-type="bibr" rid="B27">27</xref>).</p>
<p>Increasing the size (<xref ref-type="bibr" rid="B89">89</xref>) and diversity of datasets is often one of the suggested solutions to account for biases in datasets (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B122">122</xref>). However, doing so for voice and speech is a challenge as diversity in voice and speech is difficult to determine and measure. Diversity can be defined demographically, referring to gender, age, sex, ethnic origin, place of birth, and/or first language (<xref ref-type="bibr" rid="B102">102</xref>, <xref ref-type="bibr" rid="B107">107</xref>). Alternatively, it can be defined by the type of disease or condition, or by the stage in their development. Regarding speech and voice, diversity can also refer to the range of languages, dialects, and accents or the type of data (free speech, written text read, breath work, common sounds, etc.). Even within one language, there can be some linguistic variation, or &#x201C;orderly hierarchy,&#x201D; that can be critical to understanding one&#x0027;s speech. And while speech datasets usually specify languages spoken, more granularity in linguistic categorization is needed (<xref ref-type="bibr" rid="B107">107</xref>). There is also diversity in the type of recording device (professional microphone vs. phone microphone vs. ambient recording, presence of background noise, etc.), and this technical diversity can directly impact the validity and quality of analysis (<xref ref-type="bibr" rid="B107">107</xref>). However, because voice and speech are often related to language, detection of native language can represent personal information that has the potential to be misused to detect and discriminate against minorities (<xref ref-type="bibr" rid="B91">91</xref>). Moreover, there also seems to be some misperceived understanding that voice and speech samples are an unbiased form of data (<xref ref-type="bibr" rid="B116">116</xref>).</p>
</sec>
<sec id="s3c2b"><label>3.3.2.2</label><title>Discrimination</title>
<p>Although discrimination in data collection is an ongoing consideration and topic of research, Anglade &#x0026; al (<xref ref-type="bibr" rid="B118">118</xref>). specifically mentions that people with communication disorders like aphasia, a condition that could benefit greatly from the use of voice and speech biomarkers, are often excluded from research because of the methodological or ethical challenges or adaptations that can come with including those communities in research (<xref ref-type="bibr" rid="B118">118</xref>). This consideration can also include individuals with cognitive impairment or people with acute psychiatric conditions (<xref ref-type="bibr" rid="B96">96</xref>). Finally, and most importantly, Dikaios et al. (<xref ref-type="bibr" rid="B93">93</xref>) mention that few studies on speech analysis acknowledge sources of bias and show any attempt to control them, hinting that much work needs to be done to assess and address those issues.</p>
</sec>
<sec id="s3c2c"><label>3.3.2.3</label><title>Exploitation and manipulation</title>
<p>One article also delve deeper into the dangers of voice and speech data access and sharing, especially regarding data breaches (<xref ref-type="bibr" rid="B94">94</xref>). Voice forgery and doxing is of particular concern in recent times, but it is not out of the realm of possibility that recordings could be used in prosecution or even divorce proceedings (<xref ref-type="bibr" rid="B92">92</xref>).</p>
</sec>
</sec>
<sec id="s3d"><label>3.4</label><title>Legal implications</title>
<sec id="s3d1"><label>3.4.1</label><title>Conventional implications</title>
<p>There are a number of legal implications surrounding voice and speech biomarker data that are consistent with those reported in the literature about other types of health data. Articles also mention how the use of voice biomarkers could lead to some legal implications.</p>
<sec id="s3d1a"><label>3.4.1.1</label><title>Compliance across multiple jurisdictions</title>
<p>Legal implications can arise from the laws and regulations in place where voice and speech data is collected and where it is stored, and these are thus important for researchers to consider when planning and implementing research projects. This process is sometimes complicated when local regulations from the city where the participant is residing differ from the those of the province, state, or even country, or when participants move from one place to another with the device that captures their voice and speech. Furthermore, local, state, and national regulations regarding the collection and storage or data can conflict. For example, Casillas &#x0026; Cristia (<xref ref-type="bibr" rid="B88">88</xref>) give out the example of a local regulation that would ban the recording of data in a specific environment (like supermarkets or shops), and where state or national regulations consider the recording of this type of data in this environment as public (<xref ref-type="bibr" rid="B88">88</xref>). Some concerns have also been raised around the ownership of the data gathered in research, and if it belongs to the researchers or the participants in research (<xref ref-type="bibr" rid="B81">81</xref>).</p>
<p>Another way to mitigate bias and ensure diversity is to facilitate the sharing of voice and speech data between researchers (<xref ref-type="bibr" rid="B27">27</xref>). However, due to legislative and regulatory discrepancies surrounding voice and speech data and its dissemination, data sharing between research teams is difficult. Developing policies that facilitate the <italic>safe</italic> sharing of voice and speech data is of utmost importance to ensure robust AI and ML analysis of voice and speech biomarkers (<xref ref-type="bibr" rid="B82">82</xref>, <xref ref-type="bibr" rid="B87">87</xref>).</p>
</sec>
<sec id="s3d1b"><label>3.4.1.2</label><title>Safeguarding participants in data-driven trials</title>
<p>Additionally, one article highlighted concerns about the regulatory implications for research participants, particularly in cases where private companies conduct clinical trials using digital samples (like voice and speech) rather than biological ones. Unlike university-based research teams that receive federal funding and must comply with the Common Rule, these private companies are not necessarily subject to FDA oversight, the Common Rule, or other federal regulations (<xref ref-type="bibr" rid="B120">120</xref>). The article further explores this issue, noting that there appears to be a general consensus within the research ethics community that extending the Common Rule to all human subjects research would be both impractical and difficult to enforce (<xref ref-type="bibr" rid="B120">120</xref>).</p>
</sec>
<sec id="s3d1c"><label>3.4.1.3</label><title>Data ownership</title>
<p>Data ownership is a common consideration in research and clinical care involving data collection or the use of personal data. However, for speech and voice data, it seems particularly important for Indigenous communities, where cultures and traditions are often carried out orally. In that sense, voice and speech can be related to people&#x0027;s rights and ownership over their cultural heritage, and merit special consideration (<xref ref-type="bibr" rid="B123">123</xref>). Finally, Woodward &#x0026; al. mention that well-being apps lack in basic privacy policies and can sell their user&#x0027;s information to data brokers and that international regulations have failed in their attempts to give control to citizens over their personal data (<xref ref-type="bibr" rid="B121">121</xref>).</p>
</sec>
</sec>
<sec id="s3d2"><label>3.4.2</label><title>Modality-specific implications</title>
<p>A vast majority of articles address how devices and teams that gather, store, and share voice data need to follow international and national regulatory frameworks. However, one article mentioned that it can be difficult to determine if the data falls under certain privacy laws (<xref ref-type="bibr" rid="B97">97</xref>). Also, as mentioned previously, there is no universal definition of privacy, and no clear understanding of when regulations apply to the capture, storage, and processing of speech data (<xref ref-type="bibr" rid="B71">71</xref>). This also extends to a lack of common understanding and different interpretations of laws and regulations regarding speech data and speech technologies between legal and technical communities (<xref ref-type="bibr" rid="B71">71</xref>). For authors, privacy cannot be ensured without it having a universal definition and legal provisions of it as an enforceable right (<xref ref-type="bibr" rid="B71">71</xref>).</p>
<p>Using voice and speech as biomarkers leads to issues that straddle those of traditional biomarkers and challenges regarding digital health (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B91">91</xref>). Some questions have also been raised about the possibilities of allowing higher forms of data protection to some information that can be determined with the use of voice biomarkers, such as an individual&#x0027;s emotions and behavioral traits (<xref ref-type="bibr" rid="B76">76</xref>). There are also issues surrounding the privacy of voice data in civil proceedings (like divorce and litigation) or criminal cases, and questions about if private information could be subpoenaed (<xref ref-type="bibr" rid="B70">70</xref>).</p>
</sec>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><label>4</label><title>Discussion</title>
<p>This review examined the ethical, legal, and social implications (ELSI) of voice and speech data in healthcare, focusing on whether their use presents novel or exceptional challenges. Many of the identified ELSI are not unique to voice and speech data. These concerns have been extensively discussed in the context of biomedical technologies and other types of biomarkers (<xref ref-type="bibr" rid="B124">124</xref>&#x2013;<xref ref-type="bibr" rid="B127">127</xref>). However, the distinctive features of voice and speech require reinterpreting existing frameworks.</p>
<p><xref ref-type="table" rid="T2">Tables&#x00A0;2</xref>&#x2013;<xref ref-type="table" rid="T4">4</xref> present a side-by-side to illustrate how the use of voice and speech data in healthcare both aligns with conventional implications or brings out modality-specific implications. Ethical concerns such as privacy, consent, and data quality persist, but take on new dimensions due to the technical and inferential characteristics of voice. Social implications reveal how structural biases are amplified through underrepresentation and linguistic exclusion, while also introducing risks of exploitation specific to audio data. Legally, although some regulatory gaps are familiar, voice data introduces new complications in classification, protection, and admissibility. The divergence in these columns shows where existing frameworks are strained and where entirely new considerations arise. This type of analysis is important because the unique characteristics of voice introduce challenges not captured by existing discussions of other health data. Integrating these concerns into broader ELSI conversations ensures that the distinct risks and responsibilities tied to voice are recognized, while maintaining coherence across debates about health data governance.</p>
<table-wrap id="T2" position="float"><label>Table&#x00A0;2</label>
<caption><p>Ethical implications&#x2014;conventional vs. modality-specific implications.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Ethical Implication</th>
<th valign="top" align="center">Conventional implications</th>
<th valign="top" align="center">Modality-specific implications</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Privacy and Security</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Risks from continuous, passive collection of voice data. Storage and sharing of data in repositories raises risks of breaches, leaks, and re-identification</p></list-item>
<list-item><label>&#x2013;</label>
<p>Voice and speech recordings can be perceived as surveillance or intrusiveness by patients/caregivers. Potential for population-level surveillance.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Tension between data quantity vs. privacy.</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Risks from technical limits of privacy-preserving methods (encryption, anonymization, de-identification). Methods are also quickly outdated as new intrusion tech emerges.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Possible to extract voice-inferred information from speech and voice recordings (age, gender, socioeconomic status)</p></list-item>
<list-item><label>&#x2013;</label>
<p>Privacy and security considerations can extends to third-party voices (bystanders) and different types of voice data (speech, breathing, laughing, etc.) with varying sensitivity.</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Informed Consent</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Difficulty ensuring participants understand risks of data leakage and long-term harms.</p></list-item>
<list-item><label>&#x2013;</label>
<p>General and technical literacy affects how participants interpret consent forms; need for accessible language.</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Challenges in obtaining consent for continuous recording, including risks to non-consenting third parties.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Practical impossibility of gathering consent from all affected individuals, and for future events participants may want to keep private.</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Data Implications</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Difficulties in sharing datasets between research groups due to format, task-specificity, and population differences.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Data stored in inconvenient or incompatible formats; heterogeneity in recording conditions and criteria that can limit the ability to combine or re-use datasets across groups.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Recruitment is difficult, especially for rare conditions; datasets often lack key demographic/comorbidity information.</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Ensuring quality, standardization, and validation of voice/speech data for algorithm training and clinical application.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Lack of standardized formats and protocols; device and environment variability; mobile devices add hardware/software noise.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Concerns about false interpretations when data analyzed out of context; need to control for energy levels, timing, and recording conditions.</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Respect for People</td>
<td valign="top" align="left"/>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Some recordings tasks can be difficult or limited due to condition, energy levels.</p></list-item>
</list></td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T3" position="float"><label>Table&#x00A0;3</label>
<caption><p>Social implications&#x2014;conventional vs. modality-specific implications.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Social Implication</th>
<th valign="top" align="center">Conventional implications</th>
<th valign="top" align="center">Modality-specific implications</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Bias and Diversity</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Bias framed as a technical and social problem that must be addressed in development pipelines.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Bias in datasets and algorithms risks leading to unfair, unbalanced, or discriminatory outcomes in clinical care.</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Voice/speech samples are often wrongly perceived as unbiased, which hides structural inequities.</p></list-item>
<list-item><label>&#x2013;</label>
<p>Bias in voice data not only skews results but also risks misuse for discrimination against minorities (e.g., via language detection).</p></list-item>
<list-item><label>&#x2013;</label>
<p>Overrepresentation of &#x201C;typical adult speakers&#x201D; and men; underrepresentation of minorities, children, and pathological speech.</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Discrimination</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Risks of discrimination and predatory practices from large-scale health data use, especially for individuals with mental health conditions.</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Exclusion of vulnerable groups (e.g., communication disorders, cognitive impairment, acute psychiatric conditions) from research.</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Exploitation</td>
<td valign="top" align="left"/>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Risks associated with the nefarious use of voice and speech data in case of data leakage (doxing, misrepresentation of people), or use of voice and speech data in legal proceedings.</p></list-item>
</list></td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T4" position="float"><label>Table&#x00A0;4</label>
<caption><p>Legal implications&#x2014;conventional vs. modality-specific implications.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Legal Implication</th>
<th valign="top" align="center">Conventional implications</th>
<th valign="top" align="center">Modality-specific implications</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Regulatory Complexity</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Compliance with existing legal frameworks, emphasizing the need to account for overlapping and sometimes conflicting regulations across local, state, national, and international levels, especially when it comes to the sharing of voice and speech data.</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Highlights the conceptual ambiguity of implications, such as privacy itself, stressing the absence of a universal definition and enforceable right, which limits the applicability of existing laws, as well as divergent interpretation between legal and technical communities.</p></list-item>
</list></td>
</tr>
<tr>
<td valign="top" align="left">Data Ownership</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Extends ownership debates to voice as cultural heritage, particularly for Indigenous communities, linking privacy and ownership to collective rights and traditions.</p></list-item>
</list></td>
<td valign="top" align="left"/>
</tr>
<tr>
<td valign="top" align="left">Other Use of Data</td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Gaps in privacy policies of well-being apps and risks of user data being sold to third parties, emphasizing the need for compliance with existing regulations.</p></list-item>
</list></td>
<td valign="top" align="left">
<list list-type="simple">
<list-item><label>&#x2013;</label>
<p>Raises broader questions about whether voice data should be protected from legal compulsion and how privacy rights extend into judicial contexts.</p></list-item>
</list></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>A key observation is the interdependence of ethical, legal, and social implications in this domain. For example, ensuring diversity in voice datasets is more complex than in other biomarker domains (<xref ref-type="bibr" rid="B27">27</xref>). Although diversity is essential for reducing bias, building representative voice datasets is resource-intensive and logistically difficult. This challenge is compounded by fragmented legal protections and inconsistent privacy regulations. Several articles question whether existing national and international regulations adequately govern voice biomarker development. While GDPR and HIPAA provide some safeguards to patients and system users (<xref ref-type="bibr" rid="B72">72</xref>, <xref ref-type="bibr" rid="B88">88</xref>, <xref ref-type="bibr" rid="B97">97</xref>, <xref ref-type="bibr" rid="B116">116</xref>, <xref ref-type="bibr" rid="B121">121</xref>), their limitations are evident, especially since many companies rely on them as default governance models (<xref ref-type="bibr" rid="B109">109</xref>). National and international regulatory frameworks must be adapted to address the specific risks posed by new biomarkers, including voice-based digital biomarkers (<xref ref-type="bibr" rid="B71">71</xref>, <xref ref-type="bibr" rid="B78">78</xref>). Regulatory responses must keep pace with rapid industry development and clarify legal responsibility (<xref ref-type="bibr" rid="B71">71</xref>, <xref ref-type="bibr" rid="B91">91</xref>). Beneath these regulatory challenges lie unresolved technical uncertainties. While outside the scope of this review, these gaps limit a comprehensive understanding. Nevertheless, the recurring mention of technical issues (such as inconsistent labeling and processing) shows their influence on both ELSI and mitigation strategies.</p>
<p>Addressing the intersection of ethical, legal, social, and technical implications requires transdisciplinary and cross-sector engagement. As shown, the reviewed literature spans voice technology, machine learning, AI, and digital health instrumentation. Development of AI tools must incorporate insights from adjacent disciplines and from stakeholders (e.g., patients, clinicians, and advocates) throughout the design pipeline and medical AI lifecycle (<xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B71">71</xref>, <xref ref-type="bibr" rid="B91">91</xref>). The field of bioethics can offer guidance in curating these discussions, ensuring stakeholder representation and participation, and generating context-specific guidelines that go beyond overarching principles and general claims about privacy, informed consent, and data security (<xref ref-type="bibr" rid="B50">50</xref>).</p>
<p>Few articles included in this review addressed the private sector, likely due to the methodological constraints of the search strategy and inclusion criteria. Those that did, however, tended to highlight the widening gap between academic researchers and private industry (<xref ref-type="bibr" rid="B82">82</xref>, <xref ref-type="bibr" rid="B91">91</xref>). This was mostly due, as reported in the results, to the scarcity of large, diverse, high-quality datasets (<xref ref-type="bibr" rid="B36">36</xref>, <xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B40">40</xref>, <xref ref-type="bibr" rid="B128">128</xref>, <xref ref-type="bibr" rid="B129">129</xref>). This limitation motivated initiatives like the Bridge2AI-Voice consortium (<xref ref-type="bibr" rid="B130">130</xref>), which aims to develop an ethically sourced database of thousands of voice samples linked to health metadata (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B49">49</xref>, <xref ref-type="bibr" rid="B131">131</xref>). Historically, large-scale data collection projects from healthcare institutions have contributed to advance considerations for the responsible stewardship and governance of data (<xref ref-type="bibr" rid="B132">132</xref>, <xref ref-type="bibr" rid="B133">133</xref>), as well as providing viable solutions for the lack or inaccessibility of relevant data for research teams (<xref ref-type="bibr" rid="B134">134</xref>). But, as shown by (<xref ref-type="bibr" rid="B91">91</xref>), some researchers seek access to non-clinical voice recordings, such as those held by tech companies or in public repositories. These datasets, while abundant, often lack proper curation and metadata annotation needed for healthcare applications (<xref ref-type="bibr" rid="B135">135</xref>, <xref ref-type="bibr" rid="B136">136</xref>), something that appears to be lacking in private datasets (<xref ref-type="bibr" rid="B132">132</xref>). Collaboration between academic and industry sectors remains valuable, especially as private actors lead technological development in this domain (<xref ref-type="bibr" rid="B137">137</xref>). Yet, persistent communication gaps and contradictory incentive schemes across sectors impede alignment and slow field advancement (<xref ref-type="bibr" rid="B67">67</xref>).</p>
<p>The literature remains sparse on ELSI specific to voice and speech data in healthcare. Though the field is relatively new, having emerged in the early 2000s (<xref ref-type="bibr" rid="B138">138</xref>), recent years show a growing engagement with ethical, legal, and social dimensions. Despite this trend, empirical studies and in-depth conceptual analyses are still lacking. Most reviews mention ELSI superficially without probing underlying assumptions or stakeholder perspectives. This gap underscores the need for further research, particularly qualitative work exploring contributors&#x0027; views on privacy, consent, data usage, and trust. Understanding the motivations and concerns of data subjects is critical for ethical implementation.</p>
<p>Most publications on voice and speech data in healthcare originate from researchers based in the Global North (see <xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref>). This reflects the geographic concentration of datasets, infrastructure, and funding, as well as restrictive international privacy regulations that complicate cross-border data sharing (<xref ref-type="bibr" rid="B71">71</xref>, <xref ref-type="bibr" rid="B88">88</xref>, <xref ref-type="bibr" rid="B97">97</xref>). These conditions limit international collaboration and prevent research teams in low- and middle-income countries (LMICs) from accessing or contributing to existing datasets. The same imbalance holds for industry: most companies developing voice AI tools are based in the West (<xref ref-type="bibr" rid="B50">50</xref>). While this geographic clustering may ease local industry&#x2013;academia collaboration, it undermines global inclusivity. LMICs often lack the technical, financial, and institutional capacity to generate or refine voice data (<xref ref-type="bibr" rid="B132">132</xref>, <xref ref-type="bibr" rid="B139">139</xref>). This exclusion means that a large portion of the global population is left out of both the development and the benefits of voice biomarker technologies. Moreover, research priorities and disease targets are shaped by Global North interests. The COVID-19 pandemic exemplifies this dynamic: while voice AI tools for cough analysis showed promise, their global utility was undermined by training data biases skewed toward wealthier regions (<xref ref-type="bibr" rid="B140">140</xref>, <xref ref-type="bibr" rid="B141">141</xref>). This case underscores how big data production mirrors (and reinforces) existing global inequities (<xref ref-type="bibr" rid="B142">142</xref>, <xref ref-type="bibr" rid="B143">143</xref>).</p>
<p>As mentioned, one of the central questions in this review is whether the use of voice and speech data in healthcare constitutes a form of exceptionalism. Rather than endorsing voice biomarker exceptionalism (i.e., a claim that this domain requires entirely new ethical frameworks), this paper argues for <italic>contextualism</italic>. Contextualism, as drawn from bioethical debates in genomics and neuroethics, rejects binary categories (exceptional vs. non-exceptional) and instead emphasizes that ethical implications vary according to the specific features of the data, its use context, and the sociotechnical systems surrounding it (<xref ref-type="bibr" rid="B144">144</xref>&#x2013;<xref ref-type="bibr" rid="B147">147</xref>). Voice data, in this framing, is neither wholly new nor entirely analogous to prior biomarker domains. It presents a &#x201C;fundamental duality&#x201D; (<xref ref-type="bibr" rid="B145">145</xref>), carrying specific risks (e.g., such as the inferential excesses of emotional AI, the misclassification of linguistic minorities, or the covert extraction of demographic attributes), but these risks are best addressed by adapting existing frameworks to new contexts, rather than discarding them. The ELSI mentioned in this review are not necessarily unique to voice and speech biomarkers, but they do take on different meanings and understandings and require new mitigation solutions given the unique characteristics of these biomarkers. The question is, however, interesting and even important to ask.</p>
<p>These contributions support a <italic>contextualist turn</italic>: ethical frameworks should not presume novelty but must remain responsive to the specific risks and affordances of voice AI. In this light, the development of voice and speech biomarkers should be understood as part of a broader evolution in digital health, comparable to past transitions seen in genomics or neuroimaging (<xref ref-type="bibr" rid="B148">148</xref>&#x2013;<xref ref-type="bibr" rid="B151">151</xref>). Lessons from those domains (about oversight gaps, translational bottlenecks, and stakeholder exclusion) are not just relevant, but necessary guides for the governance of voice data in health.</p>
</sec>
<sec id="s5"><label>5</label><title>Limitations</title>
<p>This review has several limitations that reflect both structural constraints in the field and methodological boundaries inherent to scoping reviews. First, although this review refers to &#x201C;voice and speech data&#x201D; as a combined category, the technical distinction between voice and speech remains imprecise in the reviewed literature. Voice generally refers to the acoustic signal produced by the vocal tract, while speech encompasses the linguistic content of that signal. These modalities are often treated interchangeably, but raise distinct ethical, legal, and social concerns depending on which is being collected or analyzed. This lack of definitional consensus limits the granularity with which ELSI concerns can be differentiated and analyzed (<xref ref-type="bibr" rid="B43">43</xref>). Second, private industry perspectives are significantly underrepresented in the reviewed material. While it is common for industry work to remain unpublished or appear in grey literature, the academic databases consulted (e.g., MEDLINE, Web of Science, EMBASE, IEEE) primarily index peer-reviewed sources. As a result, commercial practices and proprietary standards (especially around data acquisition, model training, and consent) are largely absent from this analysis. This is only compelled by the current state of voice AI health-tech sector, which currently offers limited disclosure and transparency on their public-facing websites about their policies (<xref ref-type="bibr" rid="B152">152</xref>) and of their technology (<xref ref-type="bibr" rid="B153">153</xref>). Third, while many included articles mention ELSI, their engagement is often brief or speculative rather than empirical or conceptually grounded. This limits the capacity of the review to draw out structured, well-theorized distinctions across domains, particularly in areas such as incidental findings, third-party data capture, or intergenerational data ethics. Fifth, as a scoping review, this article maps the breadth of existing literature rather than evaluating its quality or methodological robustness. This is appropriate for identifying gaps and setting research agendas but limits the evidentiary strength for normative claims or policy recommendations.</p>
</sec>
<sec id="s6" sec-type="conclusions"><label>6</label><title>Conclusion</title>
<p>To our knowledge, this is the first scoping review examining the ELSI associated with the collection and use of voice and speech data in healthcare. Reviewing literature is an important initial step in conducting ethical foresight. It allows to identify issues and implications that are under consideration, uncover those which are not yet acknowledged, and highlight those for which further attention is required. The emergence of voice and speech-based biomarkers reframes how voice is understood; not only as expression, but as a data source conveying cognitive, physiological, and affective information. Voice and speech have long served as instruments of identity, expression, and social interaction. They are now increasingly central not only to human communication, but also to human-machine interaction. Voice also operates as a metaphor for representation, agency, and political visibility (<xref ref-type="bibr" rid="B154">154</xref>). With the advent of voice biomarkers, voice is no longer just symbolic, it is also a computational object for encoded cognitive, neuromuscular, and physiological signals (<xref ref-type="bibr" rid="B155">155</xref>, <xref ref-type="bibr" rid="B156">156</xref>).</p>
<p>This review also identifies several areas where additional research and policy development are urgently needed. In particular, the collection and use of voice and speech data in health contexts raise distinctive ethical concerns due to their capacity to reveal sensitive, inferential health information. The future trajectory of voice biomarker research remains uncertain, especially given the difficulty of building datasets that are trustworthy, clinically meaningful, and shareable across contexts. This review aims to support the ethical development, deployment, and governance of voice and speech data practices in healthcare and related domains.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="author-contributions"><title>Author contributions</title>
<p>M-FM: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Project administration, Validation, Visualization, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing. SB-G: Conceptualization, Data curation, Formal analysis, Investigation, Methodology, Validation, Writing &#x2013; review &#x0026; editing. HG: Formal analysis, Validation, Writing &#x2013; review &#x0026; editing. VR: Conceptualization, Funding acquisition, Methodology, Project administration, Resources, Supervision, Writing &#x2013; review &#x0026; editing. J-CB-P: Conceptualization, Formal analysis, Funding acquisition, Investigation, Project administration, Resources, Supervision, Validation, Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec id="s8"><title>Group members of the Bridge2AI-Voice consortium</title>
<p>University of South Florida, Tampa, FL, US: Yael Bensoussan. Weill Cornell Medicine, New York, NY, USA: Olivier Elemento. Weill Cornell Medicine, New York, NY, USA: Anais Rameau. Weill Cornell Medicine, New York, NY, USA: Alexandros Sigaras. Massachusetts Institute of Technology, Boston, MA, USA: Satrajit Ghosh. Vanderbilt University Medical Center, Nashville, TN, USA: Maria Powell. University of Montreal, Montreal, Quebec, Canada: Vardit Ravitsky. Simon Fraser University, Burnaby, BC, Canada: Jean Christophe Belisle-Pipon. Oregon Health &#x0026; Science University, Portland, OR, USA: David Dorr. Washington University in St. Louis, St. Louis, MO, USA: Phillip Payne. University of Toronto, Toronto, Ontario, Canada: Alistair Johnson. University of South Florida, Tampa, FL, USA: Ruth Bahr. University of Florida, Gainesville, FL, USA: Donald Bolser. Dalhousie University, Toronto, ON, Canada: Frank Rudzicz. Mount Sinai Hospital, Sinai Health, University of Toronto, Toronto, ON, Canada: Jordan Lerner-Ellis. Boston Children&#x2019;s Hospital, Boston, MA, USA: Kathy Jenkins. University of Central Florida, Orlando, FL, USA: Shaheen Awan. University of South Florida, Tampa, FL, USA: Micah Boyer. Oregon Health &#x0026; Science University, Portland, OR, USA: William Hersh. Washington University in St. Louis, St. Louis, MO, USA: Andrea Krussel. Oregon Health &#x0026; Science University, Portland, OR, USA: Steven Bedrick. UT Health, Houston, TX, USA: Toufeeq Ahmed Syed. University of South Florida, Tampa, FL, USA: Jamie Toghranegar. University of South Florida, Tampa, FL, USA: James Anibal. New York, NY, USA: Duncan Sutherland. University of South Florida, Tampa, FL, USA: Enrique Diaz-Ocampo. University of South Florida, Tampa, FL, USA: Elizabeth Silberhoz Boston Children&#x2019;s Hospital, Boston, MA, USA: John Costello. Vanderbilt University Medical Center, Nashville, TN, USA: Alexander Gelbard. Vanderbilt University Medical Center, Nashville, TN, USA: Kimberly Vinson. University of South Florida, Tampa, FL, USA: Tempestt Neal. Mount Sinai Health, Toronto, ON, Canada: Lochana Jayachandran. The Hospital for Sick Children, Toronto, ON, Canada: Evan Ng. Mount Sinai Health, Toronto, ON, Canada: Selina Casalino. University of South Florida, Tampa, FL, USA: Yassmeen Abdel-Aty. University of South Florida, Tampa, FL, USA: Karim Hanna. University of South Florida, Tampa, FL, USA: Theresa Zesiewicz. Florida Atlantic University, Boca Raton, FL, USA: Elijah Moothedan. University of South Florida, Tampa, FL, USA: Emily Evangelista. Vanderbilt University Medical Center, Nashville, TN, USA: Samantha Salvi Cruz. Weill Cornell Medicine, New York, NY, USA: Robin Zhao. University of South Florida, Tampa, FL, USA: Mohamed Ebraheem. University of South Florida, Tampa, FL, USA: Karlee Newberry. University of South Florida, Tampa, FL, USA: Iris De Santiago. University of South Florida, Tampa, FL, USA: Ellie Eiseman. University of South Florida, Tampa, FL, USA: JM Rahman. Boston Children&#x2019;s Hospital, Boston, MA, USA: Stacy Jo. Hospital for Sick Children, Toronto, ON, Canada: Anna Goldenberg.</p>
</sec>
<ack><title>Acknowledgments</title>
<p>The authors would like to thank Ashmita Grewal and Quang Huynh for their work and assistance during the earlier phases of this project. The authors are also grateful to Alden Blatter and Chloe Loewith for their valuable contributions during the revision phase of this work, particularly for their critical reading of the manuscript and insightful questions.</p>
</ack>
<sec id="s10" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s12" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Simonyan</surname> <given-names>K</given-names></name> <name><surname>Ackermann</surname> <given-names>H</given-names></name> <name><surname>Chang</surname> <given-names>EF</given-names></name> <name><surname>Greenlee</surname> <given-names>JD</given-names></name></person-group>. <article-title>New developments in understanding the complexity of human speech production</article-title>. <source>J Neurosci</source>. (<year>2016</year>) <volume>36</volume>(<issue>45</issue>):<fpage>11440</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.2424-16.2016</pub-id><pub-id pub-id-type="pmid">27911747</pub-id></mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>Z</given-names></name></person-group>. <article-title>Mechanics of human voice production and control</article-title>. <source>J Acoust Soc Am</source>. (<year>2016</year>) <volume>140</volume>(<issue>4</issue>):<fpage>2614</fpage>. <pub-id pub-id-type="doi">10.1121/1.4964509</pub-id><pub-id pub-id-type="pmid">27794319</pub-id></mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Casper</surname> <given-names>JK</given-names></name> <name><surname>Leonard</surname> <given-names>R</given-names></name></person-group>. <source>Understanding Voice Problems: A Physiological Perspective for Diagnosis and Treatment</source>. <publisher-loc>Philadelphia</publisher-loc>: <publisher-name>Lippincott Williams &#x0026; Wilkins</publisher-name> (<year>2006</year>). p. <fpage>528</fpage>.</mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kent</surname> <given-names>RD</given-names></name></person-group>. <article-title>The uniqueness of speech among motor systems</article-title>. <source>Clin Linguist Phon</source>. (<year>2004</year>) <volume>18</volume>(<issue>6&#x2013;8</issue>):<fpage>495</fpage>&#x2013;<lpage>505</lpage>. <pub-id pub-id-type="doi">10.1080/02699200410001703600</pub-id><pub-id pub-id-type="pmid">15573486</pub-id></mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lin</surname> <given-names>H</given-names></name> <name><surname>Karjadi</surname> <given-names>C</given-names></name> <name><surname>Ang</surname> <given-names>TFA</given-names></name> <name><surname>Prajakta</surname> <given-names>J</given-names></name> <name><surname>McManus</surname> <given-names>C</given-names></name> <name><surname>Alhanai</surname> <given-names>TW</given-names></name><etal/></person-group> <article-title>Identification of digital voice biomarkers for cognitive health</article-title>. <source>Explor Med</source>. (<year>2020</year>) <volume>1</volume>:<fpage>406</fpage>&#x2013;<lpage>17</lpage>. <pub-id pub-id-type="doi">10.37349/emed.2020.00028</pub-id><pub-id pub-id-type="pmid">33665648</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Schuller</surname> <given-names>B</given-names></name></person-group>. <article-title>Voice and speech analysis in search of states and traits</article-title>. In: <person-group person-group-type="editor"><name><surname>Salah</surname> <given-names>AA</given-names></name> <name><surname>Gevers</surname> <given-names>T</given-names></name></person-group>, editors. <source>Computer Analysis of Human Behavior</source>. <publisher-loc>London</publisher-loc>: <publisher-name>Springer</publisher-name> (<year>2011</year>). p. <fpage>227</fpage>&#x2013;<lpage>53</lpage>. <comment>Available online at:</comment> <pub-id pub-id-type="doi">10.1007/978-0-85729-994-9_9</pub-id> <comment>(Accessed October 24, 2024)</comment>.</mixed-citation></ref>
<ref id="B7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kamilo&#x011F;lu</surname> <given-names>RG</given-names></name> <name><surname>Sauter</surname> <given-names>DA</given-names></name></person-group>. <article-title>Voice production and perception</article-title>. <source>Oxf Res Encycl Psychol</source>. (<year>2021</year>). <pub-id pub-id-type="doi">10.1093/acrefore/9780190236557.013.766</pub-id> <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://oxfordre.com/psychology/display/10.1093/acrefore/9780190236557.001.0001/acrefore-9780190236557-e-766">https://oxfordre.com/psychology/display/10.1093/acrefore/9780190236557.001.0001/acrefore-9780190236557-e-766</ext-link> <comment>(Accessed August 29, 2024)</comment></mixed-citation></ref>
<ref id="B8"><label>8.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Pennock Speck</surname> <given-names>B</given-names></name></person-group>. <article-title>Voice and the construction of identity and meaning</article-title>. In: <person-group person-group-type="editor"><name><surname>Navarro</surname><given-names>I</given-names></name> <name><surname>Crespo</surname><given-names>NA</given-names></name></person-group>, editors. <source>In-roads of Language: Essays in English Studies</source>. <publisher-loc>Castell&#x00F3;n</publisher-loc>: <publisher-name>Publicacions de la Universitat Jaume I</publisher-name> (<year>2006</year>). p. <fpage>91</fpage>&#x2013;<lpage>102</lpage>.</mixed-citation></ref>
<ref id="B9"><label>9.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Carding</surname> <given-names>P</given-names></name> <name><surname>Mathieson</surname> <given-names>L</given-names></name></person-group>. <article-title>Voice and speech production</article-title>. In: <person-group person-group-type="editor"><name><surname>Watkinson</surname><given-names>J</given-names></name> <name><surname>Clarke</surname><given-names>R</given-names></name></person-group>, editors. <source>Scott-Brown&#x2019;s Otorhinolaryngology and Head and Neck Surgery.</source> <edition>8th ed.</edition> <publisher-loc>Boca Raton, FL</publisher-loc>: <publisher-name>CRC Press</publisher-name> (<year>2018</year>). p. <fpage>905</fpage>&#x2013;<lpage>10</lpage>.</mixed-citation></ref>
<ref id="B10"><label>10.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Redford</surname> <given-names>MA</given-names></name></person-group>. <article-title>Introduction</article-title>. In: <person-group person-group-type="editor"><name><surname>Redford</surname><given-names>MA</given-names></name></person-group>, editor. <source>The Handbook of Speech Production</source>. <publisher-loc>Hoboken</publisher-loc>: <publisher-name>John Wiley &#x0026; Sons, Ltd</publisher-name> (<year>2015</year>). p. <fpage>1</fpage>&#x2013;<lpage>10</lpage>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://onlinelibrary.wiley.com/doi/abs/10.1002/9781118584156.ch1">https://onlinelibrary.wiley.com/doi/abs/10.1002/9781118584156.ch1</ext-link> <comment>(Accessed May 12, 2024)</comment>.</mixed-citation></ref>
<ref id="B11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schlichter</surname> <given-names>A</given-names></name> <name><surname>Eidsheim</surname> <given-names>NS</given-names></name></person-group>. <article-title>Introduction: voice matters</article-title>. <source>Postmod Cult</source>. (<year>2014</year>) <volume>24</volume>(<issue>3</issue>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://muse.jhu.edu/pub/1/article/589565">https://muse.jhu.edu/pub/1/article/589565</ext-link> <comment>(Accessed May 12, 2024)</comment></mixed-citation></ref>
<ref id="B12"><label>12.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Popov</surname> <given-names>D</given-names></name></person-group>. <article-title>Speech personality, individuality and uniqueness of human voice</article-title>. In: <conf-name>Proceedings of Speech and Language 2019, 7th International Conference on Fundmental and Applied Aspects of Speech and Language</conf-name>. <publisher-loc>Belgrade</publisher-loc>: <publisher-name>Life Activities Advancement Center</publisher-name> (<year>2019</year>). p. <fpage>34</fpage>.</mixed-citation></ref>
<ref id="B13"><label>13.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Gonz&#x00E1;lez-Rodr&#x00ED;guez</surname> <given-names>J</given-names></name> <name><surname>Toledano</surname> <given-names>DT</given-names></name> <name><surname>Ortega-Garc&#x00ED;a</surname> <given-names>J</given-names></name></person-group>. <article-title>Voice biometrics</article-title>. In: <person-group person-group-type="editor"><name><surname>Jain</surname> <given-names>AK</given-names></name> <name><surname>Flynn</surname> <given-names>P</given-names></name> <name><surname>Ross</surname> <given-names>AA</given-names></name></person-group>, editors. <source>Handbook of Biometrics</source>. <edition>1st ed.</edition> <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer US</publisher-name> (<year>2008</year>). p. <fpage>151</fpage>&#x2013;<lpage>70</lpage>.</mixed-citation></ref>
<ref id="B14"><label>14.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Boles</surname> <given-names>A</given-names></name> <name><surname>Rad</surname> <given-names>P</given-names></name></person-group>. <article-title>Voice biometrics: deep learning-based voiceprint authentication system</article-title>. <conf-name>2017 12th System of Systems Engineering Conference (SoSE)</conf-name> (<year>2017</year>). p. <fpage>1</fpage>&#x2013;<lpage>6</lpage> <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/abstract/document/7994971">https://ieeexplore.ieee.org/abstract/document/7994971</ext-link> <comment>(Accessed July 3, 2024)</comment>.</mixed-citation></ref>
<ref id="B15"><label>15.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Tandogan</surname> <given-names>SE</given-names></name> <name><surname>Senear</surname> <given-names>HT</given-names></name> <name><surname>Tavli</surname> <given-names>B</given-names></name></person-group>. <article-title>Towards measuring uniqueness of human voice</article-title>. <conf-name>2017 IEEE Workshop on Information Forensics and Security (WIFS)</conf-name> (<year>2017</year>). p. <fpage>1</fpage>&#x2013;<lpage>6</lpage>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/abstract/document/8267666?casa_token=F5APTFe2TY4AAAAA:HP8XkJ1fWaAHb2iDRt18NU5L0d9oQBM2qjjyplormlUHs9hG-Cq298K2CY2o8yw6BLZG-SVejpA">https://ieeexplore.ieee.org/abstract/document/8267666?casa_token&#x003D;F5APTFe2TY4AAAAA:HP8XkJ1fWaAHb2iDRt18NU5L0d9oQBM2qjjyplormlUHs9hG-Cq298K2CY2o8yw6BLZG-SVejpA</ext-link> <comment>(Accessed October 24, 2024)</comment>.</mixed-citation></ref>
<ref id="B16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dahia</surname> <given-names>G</given-names></name> <name><surname>Jesus</surname> <given-names>L</given-names></name> <name><surname>Segundo</surname> <given-names>MP</given-names></name></person-group>. <article-title>Continuous authentication using biometrics: an advanced review</article-title>. <source>Wiley Interdiscip Rev Data Min Knowl Discov</source>. (<year>2020</year>) <volume>10</volume>(<issue>4</issue>):<fpage>e1365</fpage>. <pub-id pub-id-type="doi">10.1002/widm.1365</pub-id></mixed-citation></ref>
<ref id="B17"><label>17.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>H</given-names></name> <name><surname>Xu</surname> <given-names>C</given-names></name> <name><surname>Rathore</surname> <given-names>AS</given-names></name> <name><surname>Li</surname> <given-names>Z</given-names></name> <name><surname>Zhang</surname> <given-names>H</given-names></name> <name><surname>Song</surname> <given-names>C</given-names></name><etal/></person-group> <article-title>Vocalprint: exploring a resilient and secure voice authentication via mmWave biometric interrogation</article-title>. In<italic>:</italic> <conf-name>Proceedings of the 18th Conference on Embedded Networked Sensor Systems</conf-name>. <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name> (<year>2020</year>). p. <fpage>312</fpage>&#x2013;<lpage>25</lpage>. <comment>(SenSys &#x2018;20). Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://dl.acm.org/doi/10.1145/3384419.3430779">https://dl.acm.org/doi/10.1145/3384419.3430779</ext-link> <comment>(Accessed October 24, 2024)</comment>.</mixed-citation></ref>
<ref id="B18"><label>18.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Laput</surname> <given-names>G</given-names></name> <name><surname>Ahuja</surname> <given-names>K</given-names></name> <name><surname>Goel</surname> <given-names>M</given-names></name> <name><surname>Harrison</surname> <given-names>C.</given-names></name></person-group> <article-title>Ubicoustics: plug-and-play acoustic activity recognition</article-title>. In: <conf-name>Proceedings of the 31st Annual ACM Symposium on User Interface Software and Technology</conf-name>. <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name> (<year>2018</year>). p. <fpage>213</fpage>&#x2013;<lpage>24</lpage>. <comment>(UIST &#x2018;18)</comment>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://dl.acm.org/doi/10.1145/3242587.3242609">https://dl.acm.org/doi/10.1145/3242587.3242609</ext-link> <comment>(Accessed October 24, 2024)</comment>.</mixed-citation></ref>
<ref id="B19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Berisha</surname> <given-names>V</given-names></name> <name><surname>Liss</surname> <given-names>JM</given-names></name></person-group>. <article-title>Responsible development of clinical speech AI: bridging the gap between clinical research and technology</article-title>. <source>Npj Digit Med</source>. (<year>2024</year>) <volume>7</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1038/s41746-024-01199-1</pub-id><pub-id pub-id-type="pmid">38172429</pub-id></mixed-citation></ref>
<ref id="B20"><label>20.</label><mixed-citation publication-type="book"><collab>FDA-NIH Biomarker Working Group</collab>. <source>BEST (Biomarkers, EndpointS, and Other Tools) Resource</source>. <publisher-loc>Silver Spring (MD)</publisher-loc>: <publisher-name>Food and Drug Administration (US)</publisher-name> (<year>2016</year>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="http://www.ncbi.nlm.nih.gov/books/NBK326791/">http://www.ncbi.nlm.nih.gov/books/NBK326791/</ext-link> <comment>(Accessed October 24, 2024)</comment>.</mixed-citation></ref>
<ref id="B21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Idrisoglu</surname> <given-names>A</given-names></name> <name><surname>Dallora</surname> <given-names>AL</given-names></name> <name><surname>Anderberg</surname> <given-names>P</given-names></name> <name><surname>Berglund</surname> <given-names>JS</given-names></name></person-group>. <article-title>Applied machine learning techniques to diagnose voice-affecting conditions and disorders: systematic literature review</article-title>. <source>J Med Internet Res</source>. (<year>2023</year>) <volume>25</volume>(<issue>1</issue>):<fpage>e46105</fpage>. <pub-id pub-id-type="doi">10.2196/46105</pub-id><pub-id pub-id-type="pmid">37467031</pub-id></mixed-citation></ref>
<ref id="B22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Motahari-Nezhad</surname> <given-names>H</given-names></name> <name><surname>Fgaier</surname> <given-names>M</given-names></name> <name><surname>Mahdi Abid</surname> <given-names>M</given-names></name> <name><surname>P&#x00E9;ntek</surname> <given-names>M</given-names></name> <name><surname>Gul&#x00E1;csi</surname> <given-names>L</given-names></name> <name><surname>Zrubka</surname> <given-names>Z</given-names></name></person-group>. <article-title>Digital biomarker&#x2013;based studies: scoping review of systematic reviews</article-title>. <source>JMIR MHealth UHealth</source>. (<year>2022</year>) <volume>10</volume>(<issue>10</issue>):<fpage>e35722</fpage>. <pub-id pub-id-type="doi">10.2196/35722</pub-id><pub-id pub-id-type="pmid">36279171</pub-id></mixed-citation></ref>
<ref id="B23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Powell</surname> <given-names>D</given-names></name></person-group>. <article-title>Walk, talk, think, see and feel: harnessing the power of digital biomarkers in healthcare</article-title>. <source>Npj Digit Med</source>. (<year>2024</year>) <volume>7</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>3</lpage>. <pub-id pub-id-type="doi">10.1038/s41746-024-01023-w</pub-id><pub-id pub-id-type="pmid">38172429</pub-id></mixed-citation></ref>
<ref id="B24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Au</surname> <given-names>R</given-names></name> <name><surname>Kolachalama</surname> <given-names>VB</given-names></name> <name><surname>Paschalidis</surname> <given-names>IC</given-names></name></person-group>. <article-title>Redefining and validating digital biomarkers as fluid, dynamic multi-dimensional digital signal patterns</article-title>. <source>Front Digit Health</source>. (<year>2021</year>) <volume>3</volume>:<fpage>751629</fpage>. <pub-id pub-id-type="doi">10.3389/fdgth.2021.751629</pub-id><pub-id pub-id-type="pmid">35146485</pub-id></mixed-citation></ref>
<ref id="B25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Babrak</surname> <given-names>LM</given-names></name> <name><surname>Menetski</surname> <given-names>J</given-names></name> <name><surname>Rebhan</surname> <given-names>M</given-names></name> <name><surname>Nisato</surname> <given-names>G</given-names></name> <name><surname>Zinggeler</surname> <given-names>M</given-names></name> <name><surname>Brasier</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>Traditional and digital biomarkers: two worlds apart?</article-title> <source>Digit Biomark</source>. (<year>2019</year> ) <volume>3</volume>(<issue>2</issue>):<fpage>92</fpage>&#x2013;<lpage>102</lpage>. <pub-id pub-id-type="doi">10.1159/000502000</pub-id><pub-id pub-id-type="pmid">32095769</pub-id></mixed-citation></ref>
<ref id="B26"><label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mulinari</surname> <given-names>S</given-names></name></person-group>. <article-title>Short-circuiting biology: digital phenotypes, digital biomarkers, and shifting gazes in psychiatry</article-title>. <source>Big Data Soc</source>. (<year>2023</year>) <volume>10</volume>(<issue>1</issue>):<fpage>20539517221145680</fpage>. <pub-id pub-id-type="doi">10.1177/20539517221145680</pub-id></mixed-citation></ref>
<ref id="B27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fagherazzi</surname> <given-names>G</given-names></name> <name><surname>Fischer</surname> <given-names>A</given-names></name> <name><surname>Ismael</surname> <given-names>M</given-names></name> <name><surname>Despotovic</surname> <given-names>V</given-names></name></person-group>. <article-title>Voice for health: the use of vocal biomarkers from research to clinical practice</article-title>. <source>Digit Biomark</source>. (<year>2021</year>) <volume>5</volume>(<issue>1</issue>):<fpage>78</fpage>&#x2013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1159/000515346</pub-id><pub-id pub-id-type="pmid">34056518</pub-id></mixed-citation></ref>
<ref id="B28"><label>28.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Pizzimenti</surname> <given-names>M</given-names></name> <name><surname>Kalia</surname> <given-names>A</given-names></name> <name><surname>Toghranegar</surname> <given-names>JA</given-names></name> <name><surname>Ebraheem</surname> <given-names>M</given-names></name> <name><surname>Cummings</surname> <given-names>N</given-names></name> <name><surname>Ghosh</surname> <given-names>SS</given-names></name><etal/></person-group> <comment>Consensus-Based Definitions for Vocal Biomarkers: The International VOCAL Initiative</comment>. <comment>medRxiv</comment>. (<year>2025)</year>. <fpage>p</fpage>. <fpage>2025.10.23.25338518</fpage>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.medrxiv.org/content/10.1101/2025.10.23.25338518v1">https://www.medrxiv.org/content/10.1101/2025.10.23.25338518v1</ext-link> <comment>(Accessed November 7, 2025)</comment>.</mixed-citation></ref>
<ref id="B29"><label>29.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Robin</surname> <given-names>J</given-names></name> <name><surname>Harrison</surname> <given-names>JE</given-names></name> <name><surname>Kaufman</surname> <given-names>LD</given-names></name> <name><surname>Rudzicz</surname> <given-names>F</given-names></name> <name><surname>Simpson</surname> <given-names>W</given-names></name> <name><surname>Yancheva</surname> <given-names>M</given-names></name></person-group>. <article-title>Evaluation of speech-based digital biomarkers: review and recommendations</article-title>. <source>Digit Biomark</source>. (<year>2020</year>) <volume>4</volume>(<issue>3</issue>):<fpage>99</fpage>&#x2013;<lpage>108</lpage>. <pub-id pub-id-type="doi">10.1159/000510820</pub-id><pub-id pub-id-type="pmid">33251474</pub-id></mixed-citation></ref>
<ref id="B30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Elb&#x00E9;ji</surname> <given-names>A</given-names></name> <name><surname>Zhang</surname> <given-names>L</given-names></name> <name><surname>Higa</surname> <given-names>E</given-names></name> <name><surname>Fischer</surname> <given-names>A</given-names></name> <name><surname>Despotovic</surname> <given-names>V</given-names></name> <name><surname>Nazarov</surname> <given-names>PV</given-names></name><etal/></person-group> <article-title>Vocal biomarker predicts fatigue in people with COVID-19: results from the prospective Predi-COVID cohort study</article-title>. <source>BMJ Open</source>. (<year>2022</year>) <volume>12</volume>(<issue>11</issue>):<fpage>e062463</fpage>. <pub-id pub-id-type="doi">10.1136/bmjopen-2022-062463</pub-id></mixed-citation></ref>
<ref id="B31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rosen-Lang</surname> <given-names>Y</given-names></name> <name><surname>Zoubi</surname> <given-names>S</given-names></name> <name><surname>Cialic</surname> <given-names>R</given-names></name> <name><surname>Orenstein</surname> <given-names>T</given-names></name></person-group>. <article-title>Using voice biomarkers for frailty classification</article-title>. <source>GeroScience</source>. (<year>2023</year>) <volume>46</volume>:<fpage>1175</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1007/s11357-023-00872-9</pub-id><pub-id pub-id-type="pmid">37480417</pub-id></mixed-citation></ref>
<ref id="B32"><label>32.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Greenwood</surname> <given-names>T</given-names></name> <name><surname>Nunes</surname> <given-names>N.</given-names></name></person-group> <comment>The coming revolution of voice-based digital biomarkers to diagnose and monitor disease</comment><article-title>.</article-title> <comment>ZS</comment>. (<year>2022</year>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.zs.com/content/dam/pdfs/The-coming-revolution-of-voice-based-digital-biomarkers.pdf">https://www.zs.com/content/dam/pdfs/The-coming-revolution-of-voice-based-digital-biomarkers.pdf</ext-link> <comment>(Accessed November 17, 2025).</comment></mixed-citation></ref>
<ref id="B33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Al-Dewik</surname> <given-names>NI</given-names></name> <name><surname>Younes</surname> <given-names>SN</given-names></name> <name><surname>Essa</surname> <given-names>MM</given-names></name> <name><surname>Pathak</surname> <given-names>S</given-names></name> <name><surname>Qoronfleh</surname> <given-names>MW</given-names></name></person-group>. <article-title>Making biomarkers relevant to healthcare innovation and precision medicine</article-title>. <source>Processes</source>. (<year>2022</year>) <volume>10</volume>(<issue>6</issue>):<fpage>1107</fpage>. <pub-id pub-id-type="doi">10.3390/pr10061107</pub-id></mixed-citation></ref>
<ref id="B34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bensoussan</surname> <given-names>Y</given-names></name> <name><surname>Elemento</surname> <given-names>O</given-names></name> <name><surname>Rameau</surname> <given-names>A</given-names></name></person-group>. <article-title>Voice as an AI biomarker of health&#x2014;introducing audiomics</article-title>. <source>JAMA Otolaryngol Neck Surg</source>. (<year>2024</year>) <volume>150</volume>(<issue>4</issue>):<fpage>283</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1001/jamaoto.2023.4807</pub-id></mixed-citation></ref>
<ref id="B35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Seyhan</surname> <given-names>AA</given-names></name> <name><surname>Carini</surname> <given-names>C</given-names></name></person-group>. <article-title>Are innovation and new technologies in precision medicine paving a new era in patients centric care?</article-title> <source>J Transl Med</source>. (<year>2019</year>) <volume>17</volume>(<issue>1</issue>):<fpage>114</fpage>. <pub-id pub-id-type="doi">10.1186/s12967-019-1864-9</pub-id><pub-id pub-id-type="pmid">30953518</pub-id></mixed-citation></ref>
<ref id="B36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ahmed</surname> <given-names>S</given-names></name> <name><surname>Haigh</surname> <given-names>AMF</given-names></name> <name><surname>de Jager</surname> <given-names>CA</given-names></name> <name><surname>Garrard</surname> <given-names>P</given-names></name></person-group>. <article-title>Connected speech as a marker of disease progression in autopsy-proven Alzheimer&#x2019;s disease</article-title>. <source>Brain J Neurol</source>. (<year>2013</year>) <volume>136</volume>(<issue>Pt 12</issue>):<fpage>3727</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1093/brain/awt269</pub-id></mixed-citation></ref>
<ref id="B37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tracy</surname> <given-names>JM</given-names></name> <name><surname>&#x00D6;zkanca</surname> <given-names>Y</given-names></name> <name><surname>Atkins</surname> <given-names>DC</given-names></name> <name><surname>Hosseini Ghomi</surname> <given-names>R</given-names></name></person-group>. <article-title>Investigating voice as a biomarker: deep phenotyping methods for early detection of Parkinson&#x2019;s disease</article-title>. <source>J Biomed Inform</source>. (<year>2020</year>) <volume>104</volume>:<fpage>103362</fpage>. <pub-id pub-id-type="doi">10.1016/j.jbi.2019.103362</pub-id><pub-id pub-id-type="pmid">31866434</pub-id></mixed-citation></ref>
<ref id="B38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Briend</surname> <given-names>F</given-names></name> <name><surname>David</surname> <given-names>C</given-names></name> <name><surname>Silleresi</surname> <given-names>S</given-names></name> <name><surname>Malvy</surname> <given-names>J</given-names></name> <name><surname>Ferr&#x00E9;</surname> <given-names>S</given-names></name> <name><surname>Latinus</surname> <given-names>M</given-names></name></person-group>. <article-title>Voice acoustics allow classifying autism spectrum disorder with high accuracy</article-title>. <source>Transl Psychiatry</source>. (<year>2023</year>) <volume>13</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1038/s41398-023-02554-8</pub-id><pub-id pub-id-type="pmid">36596778</pub-id></mixed-citation></ref>
<ref id="B39"><label>39.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Adams</surname> <given-names>P</given-names></name> <name><surname>Rabbi</surname> <given-names>M</given-names></name> <name><surname>Rahman</surname> <given-names>T</given-names></name> <name><surname>Matthews</surname> <given-names>M</given-names></name> <name><surname>Voida</surname> <given-names>A</given-names></name> <name><surname>Gay</surname> <given-names>G</given-names></name><etal/></person-group> <article-title>Towards personal stress informatics: comparing minimally invasive techniques for measuring daily stress in the wild</article-title>. <conf-name>Proc&#x2014;pERVASIVEHEALTH 2014 8th Int Conf Pervasive Comput Technol Healthc</conf-name> (<year>2014</year>). p. <fpage>72</fpage>&#x2013;<lpage>9</lpage></mixed-citation></ref>
<ref id="B40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shin</surname> <given-names>D</given-names></name> <name><surname>Cho</surname> <given-names>WI</given-names></name> <name><surname>Park</surname> <given-names>CHK</given-names></name> <name><surname>Rhee</surname> <given-names>SJ</given-names></name> <name><surname>Kim</surname> <given-names>MJ</given-names></name> <name><surname>Lee</surname> <given-names>H</given-names></name><etal/></person-group> <article-title>Detection of minor and Major depression through voice as a biomarker using machine learning</article-title>. <source>J Clin Med</source>. (<year>2021</year>) <volume>10</volume>(<issue>14</issue>):<fpage>3046</fpage>. <pub-id pub-id-type="doi">10.3390/jcm10143046</pub-id><pub-id pub-id-type="pmid">34300212</pub-id></mixed-citation></ref>
<ref id="B41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Naseer</surname> <given-names>A</given-names></name> <name><surname>Rani</surname> <given-names>M</given-names></name> <name><surname>Naz</surname> <given-names>S</given-names></name> <name><surname>Razzak</surname> <given-names>MI</given-names></name> <name><surname>Imran</surname> <given-names>M</given-names></name> <name><surname>Xu</surname> <given-names>G</given-names></name></person-group>. <article-title>Refining Parkinson&#x2019;s neurological disorder identification through deep transfer learning</article-title>. <source>Neural Comput Appl</source>. (<year>2020</year>) <volume>32</volume>(<issue>3</issue>):<fpage>839</fpage>&#x2013;<lpage>54</lpage>. <pub-id pub-id-type="doi">10.1007/s00521-019-04069-0</pub-id></mixed-citation></ref>
<ref id="B42"><label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Raina</surname> <given-names>R</given-names></name> <name><surname>Jha</surname> <given-names>RK</given-names></name></person-group>. <article-title>Intelligent and interactive healthcare system (I2HS) using machine learning</article-title>. <source>IEEE Access</source>. (<year>2022</year>) <volume>10</volume>:<fpage>116402</fpage>&#x2013;<lpage>24</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2022.3197878</pub-id></mixed-citation></ref>
<ref id="B43"><label>43.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kalia</surname> <given-names>A</given-names></name> <name><surname>Boyer</surname> <given-names>M</given-names></name> <name><surname>Fagherazzi</surname> <given-names>G</given-names></name> <name><surname>B&#x00E9;lisle-Pipon</surname> <given-names>JC</given-names></name> <name><surname>Bensoussan</surname> <given-names>Y</given-names></name></person-group>. <article-title>Master protocols in vocal biomarker development to reduce variability and advance clinical precision: a narrative review</article-title>. <source>Front Digit Health</source>. (<year>2025</year>) <volume>7</volume>:<fpage>1619183</fpage>. <pub-id pub-id-type="doi">10.3389/fdgth.2025.1619183</pub-id><pub-id pub-id-type="pmid">40657648</pub-id></mixed-citation></ref>
<ref id="B44"><label>44.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>GS</given-names></name> <name><surname>Jovanovic</surname> <given-names>N</given-names></name> <name><surname>Sung</surname> <given-names>CK</given-names></name> <name><surname>Doyle</surname> <given-names>PC</given-names></name></person-group>. <article-title>A scoping review of artificial intelligence detection of voice pathology: challenges and opportunities</article-title>. <source>Otolaryngol Neck Surg</source>. (<year>2024</year>) <volume>171</volume>(<issue>3</issue>):<fpage>658</fpage>&#x2013;<lpage>66</lpage>. <pub-id pub-id-type="doi">10.1002/ohn.809</pub-id></mixed-citation></ref>
<ref id="B45"><label>45.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Bonastre</surname> <given-names>JF</given-names></name> <name><surname>Delgado</surname> <given-names>H</given-names></name> <name><surname>Evans</surname> <given-names>N</given-names></name> <name><surname>Kinnunen</surname> <given-names>T</given-names></name> <name><surname>Lee</surname> <given-names>KA</given-names></name> <name><surname>Liu</surname> <given-names>X</given-names></name><etal/></person-group> <comment>Benchmarking and challenges in security and privacy for voice biometrics</comment>. <comment>arXiv</comment>. (<year>2021</year>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2109.00281">http://arxiv.org/abs/2109.00281</ext-link> <comment>(Accessed October 25, 2024)</comment>.</mixed-citation></ref>
<ref id="B46"><label>46.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>X</given-names></name> <name><surname>Li</surname> <given-names>Z</given-names></name> <name><surname>Setlur</surname> <given-names>S</given-names></name> <name><surname>Xu</surname> <given-names>W</given-names></name></person-group>. <article-title>Exploring racial and gender disparities in voice biometrics</article-title>. <source>Sci Rep</source>. (<year>2022</year>) <volume>12</volume>(<issue>1</issue>):<fpage>3723</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-022-06673-y</pub-id><pub-id pub-id-type="pmid">35260572</pub-id></mixed-citation></ref>
<ref id="B47"><label>47.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Leschanowsky</surname> <given-names>A</given-names></name> <name><surname>Rusti</surname> <given-names>C</given-names></name> <name><surname>Quinlan</surname> <given-names>C</given-names></name> <name><surname>Pnacek</surname> <given-names>M</given-names></name> <name><surname>Gorce</surname> <given-names>L</given-names></name> <name><surname>Hutiri</surname> <given-names>W</given-names></name></person-group>. <article-title>A data perspective on ethical challenges in voice biometrics research</article-title>. <source>IEEE Trans Biom Behav Identity Sci</source>. (<year>2024</year>):<fpage>1</fpage>.</mixed-citation></ref>
<ref id="B48"><label>48.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wells</surname> <given-names>A</given-names></name> <name><surname>Usman</surname> <given-names>AB</given-names></name></person-group>. <article-title>Trust and voice biometrics authentication for internet of things</article-title>. <source>Int J Inf Secur Priv IJISP</source>. (<year>2023</year>) <volume>17</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>28</lpage>.</mixed-citation></ref>
<ref id="B49"><label>49.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>B&#x00E9;lisle-Pipon</surname> <given-names>JC</given-names></name> <name><surname>Powell</surname> <given-names>M</given-names></name> <name><surname>English</surname> <given-names>R</given-names></name> <name><surname>Malo</surname> <given-names>MF</given-names></name> <name><surname>Ravitsky</surname> <given-names>V</given-names></name> <name><surname>Bensoussan</surname> <given-names>Y</given-names></name></person-group>. <article-title>Stakeholder perspectives on ethical and trustworthy voice AI in health care</article-title>. <source>Digit Health</source>. (<year>2024</year>) <volume>10</volume>:<fpage>20552076241260407</fpage>. <pub-id pub-id-type="doi">10.1177/20552076241260407</pub-id></mixed-citation></ref>
<ref id="B50"><label>50.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Andreoletti</surname> <given-names>M</given-names></name> <name><surname>Haller</surname> <given-names>L</given-names></name> <name><surname>Vayena</surname> <given-names>E</given-names></name> <name><surname>Blasimme</surname> <given-names>A</given-names></name></person-group>. <article-title>Mapping the ethical landscape of digital biomarkers: a scoping review</article-title>. <source>PLoS Digit Health</source>. (<year>2024</year>) <volume>3</volume>(<issue>5</issue>):<fpage>e0000519</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pdig.0000519</pub-id><pub-id pub-id-type="pmid">38753605</pub-id></mixed-citation></ref>
<ref id="B51"><label>51.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ford</surname> <given-names>E</given-names></name> <name><surname>Milne</surname> <given-names>R</given-names></name> <name><surname>Curlewis</surname> <given-names>K</given-names></name></person-group>. <article-title>Ethical issues when using digital biomarkers and artificial intelligence for the early detection of dementia</article-title>. <source>Wiley Interdiscip Rev Data Min Knowl Discov</source>. (<year>2023</year>) <volume>13</volume>(<issue>3</issue>):<fpage>e1492</fpage>. <pub-id pub-id-type="doi">10.1002/widm.1492</pub-id><pub-id pub-id-type="pmid">38439952</pub-id></mixed-citation></ref>
<ref id="B52"><label>52.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Subbian</surname> <given-names>V</given-names></name> <name><surname>Galvin</surname> <given-names>HK</given-names></name> <name><surname>Petersen</surname> <given-names>C</given-names></name> <name><surname>Solomonides</surname> <given-names>A</given-names></name></person-group>. <article-title><italic>Ethical, legal, and social Issues (E</italic>LSI) in mental health informatics</article-title>. <source>Ment Health Inform Enabling Learn Ment Healthc Syst</source>. (<year>2021</year>):<fpage>479</fpage>&#x2013;<lpage>503</lpage>.</mixed-citation></ref>
<ref id="B53"><label>53.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wright</surname> <given-names>JM</given-names></name> <name><surname>Regele</surname> <given-names>OB</given-names></name> <name><surname>Kourtis</surname> <given-names>LC</given-names></name> <name><surname>Pszenny</surname> <given-names>SM</given-names></name> <name><surname>Sirkar</surname> <given-names>R</given-names></name> <name><surname>Kovalchick</surname> <given-names>C</given-names></name><etal/></person-group> <article-title>Evolution of the digital biomarker ecosystem</article-title>. <source>Digit Med</source>. (<year>2017</year>) <volume>3</volume>(<issue>4</issue>):<fpage>154</fpage>. <pub-id pub-id-type="doi">10.4103/digm.digm_35_17</pub-id></mixed-citation></ref>
<ref id="B54"><label>54.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Armstrong</surname> <given-names>R</given-names></name> <name><surname>Hall</surname> <given-names>BJ</given-names></name> <name><surname>Waters</surname> <given-names>DJ</given-names></name></person-group>, <collab>Cochrane Update E</collab>. <article-title>Scoping the scope&#x201D; of a cochrane review</article-title>. <source>J Public Health Oxf Engl</source>. (<year>2011</year>) <volume>33</volume>(<issue>1</issue>):<fpage>147</fpage>&#x2013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1093/pubmed/fdr015</pub-id></mixed-citation></ref>
<ref id="B55"><label>55.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Munn</surname> <given-names>Z</given-names></name> <name><surname>Peters</surname> <given-names>MDJ</given-names></name> <name><surname>Stern</surname> <given-names>C</given-names></name> <name><surname>Tufanaru</surname> <given-names>C</given-names></name> <name><surname>McArthur</surname> <given-names>A</given-names></name> <name><surname>Aromataris</surname> <given-names>E</given-names></name></person-group>. <article-title>Systematic review or scoping review? Guidance for authors when choosing between a systematic or scoping review approach</article-title>. <source>BMC Med Res Methodol</source>. (<year>2018</year>) <volume>18</volume>(<issue>1</issue>):<fpage>143</fpage>. <pub-id pub-id-type="doi">10.1186/s12874-018-0611-x</pub-id><pub-id pub-id-type="pmid">30453902</pub-id></mixed-citation></ref>
<ref id="B56"><label>56.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Peters</surname> <given-names>MDJ</given-names></name> <name><surname>Godfrey</surname> <given-names>C</given-names></name> <name><surname>McInerney</surname> <given-names>P</given-names></name> <name><surname>Khalil</surname> <given-names>H</given-names></name> <name><surname>Larsen</surname> <given-names>P</given-names></name> <name><surname>Marnie</surname> <given-names>C</given-names></name><etal/></person-group> <article-title>Best practice guidance and reporting items for the development of scoping review protocols</article-title>. <source>JBI Evid Synth</source>. (<year>2022</year>) <volume>20</volume>(<issue>4</issue>):<fpage>953</fpage>. <pub-id pub-id-type="doi">10.11124/JBIES-21-00242</pub-id><pub-id pub-id-type="pmid">35102103</pub-id></mixed-citation></ref>
<ref id="B57"><label>57.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pham</surname> <given-names>MT</given-names></name> <name><surname>Raji&#x0107;</surname> <given-names>A</given-names></name> <name><surname>Greig</surname> <given-names>JD</given-names></name> <name><surname>Sargeant</surname> <given-names>JM</given-names></name> <name><surname>Papadopoulos</surname> <given-names>A</given-names></name> <name><surname>McEwen</surname> <given-names>SA</given-names></name></person-group>. <article-title>A scoping review of scoping reviews: advancing the approach and enhancing the consistency</article-title>. <source>Res Synth Methods</source>. (<year>2014</year>) <volume>5</volume>(<issue>4</issue>):<fpage>371</fpage>&#x2013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1002/jrsm.1123</pub-id><pub-id pub-id-type="pmid">26052958</pub-id></mixed-citation></ref>
<ref id="B58"><label>58.</label><mixed-citation publication-type="other"><collab>Veritas Health Innovation</collab>. <comment>Covidence</comment>. (<year>2014</year>). <comment>Covidence Systematic Review Software</comment>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.covidence.org/">https://www.covidence.org/</ext-link> <comment>(Accessed December 14, 2022)</comment>.</mixed-citation></ref>
<ref id="B59"><label>59.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hawker</surname> <given-names>S</given-names></name> <name><surname>Payne</surname> <given-names>S</given-names></name> <name><surname>Kerr</surname> <given-names>C</given-names></name> <name><surname>Hardey</surname> <given-names>M</given-names></name> <name><surname>Powell</surname> <given-names>J</given-names></name></person-group>. <article-title>Appraising the evidence: reviewing disparate data systematically</article-title>. <source>Qual Health Res</source>. (<year>2002</year>) <volume>12</volume>(<issue>9</issue>):<fpage>1284</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1177/1049732302238251</pub-id><pub-id pub-id-type="pmid">12448672</pub-id></mixed-citation></ref>
<ref id="B60"><label>60.</label><mixed-citation publication-type="other"><collab>Lumivero</collab>. <comment>NVivo (Version 14)</comment>. (<year>2023</year>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="www.lumivero.com">www.lumivero.com</ext-link> <comment>(Accessed March 27, 2025).</comment></mixed-citation></ref>
<ref id="B61"><label>61.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Neale</surname> <given-names>J</given-names></name></person-group>. <article-title>Iterative categorization (IC): a systematic technique for analysing qualitative data</article-title>. <source>Addiction</source>. (<year>2016</year>) <volume>111</volume>(<issue>6</issue>):<fpage>1096</fpage>&#x2013;<lpage>106</lpage>. <pub-id pub-id-type="doi">10.1111/add.13314</pub-id><pub-id pub-id-type="pmid">26806155</pub-id></mixed-citation></ref>
<ref id="B62"><label>62.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McGowan</surname> <given-names>J</given-names></name> <name><surname>Straus</surname> <given-names>S</given-names></name> <name><surname>Moher</surname> <given-names>D</given-names></name> <name><surname>Langlois</surname> <given-names>EV</given-names></name> <name><surname>O&#x2019;Brien</surname> <given-names>KK</given-names></name> <name><surname>Horsley</surname> <given-names>T</given-names></name><etal/></person-group> <article-title>Reporting scoping reviews&#x2014;PRISMA ScR extension</article-title>. <source>J Clin Epidemiol</source>. (<year>2020</year>) <volume>123</volume>:<fpage>177</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/j.jclinepi.2020.03.016</pub-id><pub-id pub-id-type="pmid">32229248</pub-id></mixed-citation></ref>
<ref id="B63"><label>63.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jeong</surname> <given-names>H</given-names></name> <name><surname>Jeong</surname> <given-names>YW</given-names></name> <name><surname>Park</surname> <given-names>Y</given-names></name> <name><surname>Kim</surname> <given-names>K</given-names></name> <name><surname>Park</surname> <given-names>J</given-names></name> <name><surname>Kang</surname> <given-names>DR</given-names></name></person-group>. <article-title>Applications of deep learning methods in digital biomarker research using noninvasive sensing data</article-title>. <source>Digit Health</source>. (<year>2022</year>) <volume>8</volume>:<fpage>20552076221136642</fpage>. <pub-id pub-id-type="doi">10.1177/20552076221136642</pub-id><pub-id pub-id-type="pmid">36353696</pub-id></mixed-citation></ref>
<ref id="B64"><label>64.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ross</surname> <given-names>A</given-names></name> <name><surname>Banerjee</surname> <given-names>S</given-names></name> <name><surname>Chowdhury</surname> <given-names>A</given-names></name></person-group>. <article-title>Deducing health cues from biometric data</article-title>. <source>Comput Vis Image Underst</source>. (<year>2022</year>) <volume>221</volume>:<fpage>103438</fpage>. <pub-id pub-id-type="doi">10.1016/j.cviu.2022.103438</pub-id></mixed-citation></ref>
<ref id="B65"><label>65.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ganesh</surname> <given-names>S</given-names></name> <name><surname>Chithambaram</surname> <given-names>T</given-names></name> <name><surname>Krishnan</surname> <given-names>NR</given-names></name> <name><surname>Vincent</surname> <given-names>DR</given-names></name> <name><surname>Kaliappan</surname> <given-names>J</given-names></name> <name><surname>Srinivasan</surname> <given-names>K</given-names></name></person-group>. <article-title>Exploring Huntington&#x2019;s disease diagnosis via artificial intelligence models: a comprehensive review</article-title>. <source>Diagnostics</source>. (<year>2023</year>) <volume>13</volume>(<issue>23</issue>):<fpage>3592</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics13233592</pub-id><pub-id pub-id-type="pmid">38066833</pub-id></mixed-citation></ref>
<ref id="B66"><label>66.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Genaro Motti</surname> <given-names>V</given-names></name></person-group>. <article-title>Towards a design space for emotion recognition</article-title>. <conf-name>Adjun Proc 2021 ACM Int Jt Conf Pervasive Ubiquitous Comput Proc 2021 ACM Int Symp Wearable Comput</conf-name> (<year>2021</year>). p. <fpage>243</fpage>&#x2013;<lpage>7</lpage></mixed-citation></ref>
<ref id="B67"><label>67.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Miley</surname> <given-names>EV</given-names></name> <name><surname>Schaeffler</surname> <given-names>F</given-names></name> <name><surname>Beck</surname> <given-names>J</given-names></name> <name><surname>Eichner</surname> <given-names>M</given-names></name> <name><surname>Jannetts</surname> <given-names>S</given-names></name></person-group>. <article-title>Secure account-based data capture with smartphones&#x2014;preliminary results from a study of articulatory precision in clinical depression</article-title>. <source>Linguist Vanguard</source>. (<year>2021</year>) <volume>7</volume>:<fpage>20190015</fpage>. <pub-id pub-id-type="doi">10.1515/lingvan-2019-0015</pub-id></mixed-citation></ref>
<ref id="B68"><label>68.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ortiz-Clavijo</surname> <given-names>LF</given-names></name> <name><surname>Gallego-Duque</surname> <given-names>CJ</given-names></name> <name><surname>David-Diaz</surname> <given-names>JC</given-names></name> <name><surname>Ortiz-Zamora</surname> <given-names>AF</given-names></name></person-group>. <article-title>Implications of emotion recognition technologies: balancing privacy and public safety</article-title>. <source>IEEE Technol Soc Mag</source>. (<year>2023</year>) <volume>42</volume>(<issue>3</issue>):<fpage>69</fpage>&#x2013;<lpage>75</lpage>. <pub-id pub-id-type="doi">10.1109/MTS.2023.3306530</pub-id></mixed-citation></ref>
<ref id="B69"><label>69.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Harris</surname> <given-names>C</given-names></name> <name><surname>Tang</surname> <given-names>Y</given-names></name> <name><surname>Birnbaum</surname> <given-names>E</given-names></name> <name><surname>Cherian</surname> <given-names>C</given-names></name> <name><surname>Mendhe</surname> <given-names>D</given-names></name> <name><surname>Chen</surname> <given-names>MH</given-names></name></person-group>. <article-title>Digital neuropsychology beyond computerized cognitive assessment: applications of novel digital technologies</article-title>. <source>Arch Clin Neuropsychol</source>. (<year>2024</year>) <volume>39</volume>(<issue>3</issue>):<fpage>290</fpage>&#x2013;<lpage>304</lpage>. <pub-id pub-id-type="doi">10.1093/arclin/acae016</pub-id><pub-id pub-id-type="pmid">38520381</pub-id></mixed-citation></ref>
<ref id="B70"><label>70.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carter</surname> <given-names>A</given-names></name> <name><surname>Liddle</surname> <given-names>J</given-names></name> <name><surname>Hall</surname> <given-names>W</given-names></name> <name><surname>Chenery</surname> <given-names>H</given-names></name></person-group>. <article-title>Mobile phones in research and treatment: ethical guidelines and future directions</article-title>. <source>JMIR MHealth UHealth</source>. (<year>2015</year>) <volume>3</volume>(<issue>4</issue>):<fpage>e4538</fpage>. <pub-id pub-id-type="doi">10.2196/mhealth.4538</pub-id></mixed-citation></ref>
<ref id="B71"><label>71.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Nautsch</surname> <given-names>A</given-names></name> <name><surname>Jasserand</surname> <given-names>C</given-names></name> <name><surname>Kindt</surname> <given-names>E</given-names></name> <name><surname>Todisco</surname> <given-names>M</given-names></name> <name><surname>Trancoso</surname> <given-names>I</given-names></name> <name><surname>Evans</surname> <given-names>N.</given-names></name></person-group> <comment>The GDPR &#x0026; speech data: Reflections of legal and technology communities, first steps towards a common understanding</comment>. <comment>ArXiv Prepr ArXiv190703458</comment>. (<year>2019</year>).</mixed-citation></ref>
<ref id="B72"><label>72.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bauer</surname> <given-names>M</given-names></name> <name><surname>Glenn</surname> <given-names>T</given-names></name> <name><surname>Monteith</surname> <given-names>S</given-names></name> <name><surname>Bauer</surname> <given-names>R</given-names></name> <name><surname>Whybrow</surname> <given-names>PC</given-names></name> <name><surname>Geddes</surname> <given-names>J</given-names></name></person-group>. <article-title>Ethical perspectives on recommending digital technology for patients with mental illness</article-title>. <source>Int J Bipolar Disord</source>. (<year>2017</year>) <volume>5</volume>(<issue>1</issue>):<fpage>6</fpage>. <pub-id pub-id-type="doi">10.1186/s40345-017-0073-9</pub-id><pub-id pub-id-type="pmid">28155206</pub-id></mixed-citation></ref>
<ref id="B73"><label>73.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lane</surname> <given-names>ND</given-names></name> <name><surname>Miluzzo</surname> <given-names>E</given-names></name> <name><surname>Lu</surname> <given-names>H</given-names></name> <name><surname>Peebles</surname> <given-names>D</given-names></name> <name><surname>Choudhury</surname> <given-names>T</given-names></name> <name><surname>Campbell</surname> <given-names>AT</given-names></name></person-group>. <article-title>A survey of mobile phone sensing</article-title>. <source>IEEE Commun Mag</source>. (<year>2010</year>) <volume>48</volume>(<issue>9</issue>):<fpage>140</fpage>&#x2013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1109/MCOM.2010.5560598</pub-id></mixed-citation></ref>
<ref id="B74"><label>74.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nautsch</surname> <given-names>A</given-names></name> <name><surname>Jim&#x00E9;nez</surname> <given-names>A</given-names></name> <name><surname>Treiber</surname> <given-names>A</given-names></name> <name><surname>Kolberg</surname> <given-names>J</given-names></name> <name><surname>Jasserand</surname> <given-names>C</given-names></name> <name><surname>Kindt</surname> <given-names>E</given-names></name><etal/></person-group> <article-title>Preserving privacy in speaker and speech characterisation</article-title>. <source>Comput Speech Lang</source>. (<year>2019</year>) <volume>58</volume>:<fpage>441</fpage>&#x2013;<lpage>80</lpage>. <pub-id pub-id-type="doi">10.1016/j.csl.2019.06.001</pub-id></mixed-citation></ref>
<ref id="B75"><label>75.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Petti</surname> <given-names>U</given-names></name> <name><surname>Nyrup</surname> <given-names>R</given-names></name> <name><surname>Skopek</surname> <given-names>JM</given-names></name> <name><surname>Korhonen</surname> <given-names>A.</given-names></name></person-group> <article-title>Ethical considerations in the early detection of Alzheimer&#x2019;s disease using speech and AI</article-title>. In: <conf-name>Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency</conf-name>. <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name> (<year>2023</year>). p. <fpage>1062</fpage>&#x2013;<lpage>75</lpage>. <comment>(FAccT &#x2018;23)</comment>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://dl.acm.org/doi/10.1145/3593013.3594063">https://dl.acm.org/doi/10.1145/3593013.3594063</ext-link> <comment>(Accessed May 16, 2024)</comment>.</mixed-citation></ref>
<ref id="B76"><label>76.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hurley</surname> <given-names>ME</given-names></name> <name><surname>Sonig</surname> <given-names>A</given-names></name> <name><surname>Herrington</surname> <given-names>J</given-names></name> <name><surname>Storch</surname> <given-names>EA</given-names></name> <name><surname>L&#x00E1;zaro-Mu&#x00F1;oz</surname> <given-names>G</given-names></name> <name><surname>Blumenthal-Barby</surname> <given-names>J</given-names></name><etal/></person-group> <article-title>Ethical considerations for integrating multimodal computer perception and neurotechnology</article-title>. <source>Front Hum Neurosci</source>. (<year>2024</year>) <volume>18</volume>:<fpage>1332451</fpage>. <pub-id pub-id-type="doi">10.3389/fnhum.2024.1332451</pub-id><pub-id pub-id-type="pmid">38435745</pub-id></mixed-citation></ref>
<ref id="B77"><label>77.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Koops</surname> <given-names>S</given-names></name> <name><surname>Brederoo</surname> <given-names>SG</given-names></name> <name><surname>de Boer</surname> <given-names>JN</given-names></name> <name><surname>Nadema</surname> <given-names>FG</given-names></name> <name><surname>Voppel</surname> <given-names>AE</given-names></name> <name><surname>Sommer</surname> <given-names>IE</given-names></name></person-group>. <article-title>Speech as a biomarker for depression</article-title>. <source>CNS Neurol Disord Drug Targets</source>. (<year>2023</year>) <volume>22</volume>(<issue>2</issue>):<fpage>152</fpage>&#x2013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.2174/1871527320666211213125847</pub-id><pub-id pub-id-type="pmid">34961469</pub-id></mixed-citation></ref>
<ref id="B78"><label>78.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pentland</surname> <given-names>A</given-names></name> <name><surname>Lazer</surname> <given-names>D</given-names></name> <name><surname>Brewer</surname> <given-names>D</given-names></name> <name><surname>Heibeck</surname> <given-names>T</given-names></name></person-group>. <article-title>Using reality mining to improve public health and medicine</article-title>. <source>Stud Health Technol Inform</source>. (<year>2009</year>) <volume>149</volume>(<issue>ck1, 9214582</issue>):<fpage>93</fpage>&#x2013;<lpage>102</lpage>.<pub-id pub-id-type="pmid">19745474</pub-id></mixed-citation></ref>
<ref id="B79"><label>79.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Slavich</surname> <given-names>GM</given-names></name> <name><surname>Taylor</surname> <given-names>S</given-names></name> <name><surname>Picard</surname> <given-names>RW</given-names></name></person-group>. <article-title>Stress measurement using speech: recent advancements, validation issues, and ethical and privacy considerations</article-title>. <source>Stress Int J Biol Stress</source>. (<year>2019</year>) <volume>22</volume>(<issue>4</issue>):<fpage>408</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1080/10253890.2019.1584180</pub-id></mixed-citation></ref>
<ref id="B80"><label>80.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bavli</surname> <given-names>I</given-names></name> <name><surname>Ho</surname> <given-names>A</given-names></name> <name><surname>Mahal</surname> <given-names>R</given-names></name> <name><surname>McKeown</surname> <given-names>MJ</given-names></name></person-group>. <article-title>Ethical concerns around privacy and data security in AI health monitoring for Parkinson&#x2019;s disease: insights from patients, family members, and healthcare professionals</article-title>. <source>AI Soc</source>. (<year>2024</year>) <volume>40</volume>:<fpage>155</fpage>&#x2013;<lpage>65</lpage>. <pub-id pub-id-type="doi">10.1007/s00146-023-01843-6</pub-id></mixed-citation></ref>
<ref id="B81"><label>81.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gold</surname> <given-names>M</given-names></name> <name><surname>Amatniek</surname> <given-names>J</given-names></name> <name><surname>Carrillo</surname> <given-names>MC</given-names></name> <name><surname>Cedarbaum</surname> <given-names>JM</given-names></name> <name><surname>Hendrix</surname> <given-names>JA</given-names></name> <name><surname>Miller</surname> <given-names>BB</given-names></name><etal/></person-group> <article-title>Digital technologies as biomarkers, clinical outcomes assessment, and recruitment tools in Alzheimer&#x2019;s disease clinical trials</article-title>. <source>Alzheimers Dement Transl Res Clin Interv</source>. (<year>2018</year>) <volume>4</volume>:<fpage>234</fpage>&#x2013;<lpage>42</lpage>. <pub-id pub-id-type="doi">10.1016/j.trci.2018.04.003</pub-id></mixed-citation></ref>
<ref id="B82"><label>82.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Popp</surname> <given-names>Z</given-names></name> <name><surname>Low</surname> <given-names>S</given-names></name> <name><surname>Igwe</surname> <given-names>A</given-names></name> <name><surname>Rahman</surname> <given-names>MS</given-names></name> <name><surname>Kim</surname> <given-names>M</given-names></name> <name><surname>Khan</surname> <given-names>R</given-names></name><etal/></person-group> <article-title>Shifting from active to passive monitoring of Alzheimer disease: the state of the research</article-title>. <source>J Am Heart Assoc</source>. (<year>2024</year>) <volume>13</volume>(<issue>2</issue>):<fpage>e031247</fpage>. <pub-id pub-id-type="doi">10.1161/JAHA.123.031247</pub-id><pub-id pub-id-type="pmid">38226518</pub-id></mixed-citation></ref>
<ref id="B83"><label>83.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bailey</surname> <given-names>JO</given-names></name> <name><surname>Patel</surname> <given-names>B</given-names></name> <name><surname>Gurari</surname> <given-names>D</given-names></name></person-group>. <article-title>A perspective on building ethical datasets for Children&#x2019;s conversational agents</article-title>. <source>Front Artif Intell</source>. (<year>2021</year>) <volume>4</volume>:<fpage>637532</fpage>. <pub-id pub-id-type="doi">10.3389/frai.2021.637532</pub-id><pub-id pub-id-type="pmid">34056578</pub-id></mixed-citation></ref>
<ref id="B84"><label>84.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Du</surname> <given-names>Y</given-names></name> <name><surname>Lubniewski</surname> <given-names>K</given-names></name> <name><surname>Price</surname> <given-names>L</given-names></name> <name><surname>Breslin</surname> <given-names>G</given-names></name> <name><surname>Thomson</surname> <given-names>P</given-names></name> <name><surname>Jinadasa</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>They Can&#x2019;t believe they&#x2019;re a tiger&#x201D;: insights from pediatric speech-language pathologist mobile app users and app designers</article-title>. <source>Int J Lang Commun Disord</source>. (<year>2023</year>) <volume>58</volume>(<issue>5</issue>):<fpage>1717</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1111/1460-6984.12898</pub-id><pub-id pub-id-type="pmid">37219400</pub-id></mixed-citation></ref>
<ref id="B85"><label>85.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Latif</surname> <given-names>S</given-names></name> <name><surname>Ali</surname> <given-names>HS</given-names></name> <name><surname>Usama</surname> <given-names>M</given-names></name> <name><surname>Rana</surname> <given-names>R</given-names></name> <name><surname>Schuller</surname> <given-names>B</given-names></name> <name><surname>Qadir</surname> <given-names>J</given-names></name></person-group>. <comment>AI-Based Emotion Recognition: Promise, Peril, and Prescriptions for Prosocial Path</comment>. <comment>ArXiv Prepr ArXiv221107290</comment>. (<year>2022</year>).</mixed-citation></ref>
<ref id="B86"><label>86.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Garnerin</surname> <given-names>M</given-names></name> <name><surname>Rossato</surname> <given-names>S</given-names></name> <name><surname>Besacier</surname> <given-names>L.</given-names></name></person-group> <comment>Gender Representation in Open Source Speech Resources</comment>. (<year>2020</year>). p. <fpage>6599</fpage>&#x2013;<lpage>605</lpage>.</mixed-citation></ref>
<ref id="B87"><label>87.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fraser</surname> <given-names>KC</given-names></name> <name><surname>Linz</surname> <given-names>N</given-names></name> <name><surname>Lindsay</surname> <given-names>H</given-names></name> <name><surname>K&#x00F6;nig</surname> <given-names>A</given-names></name></person-group>. <article-title>The importance of sharing patient-generated clinical speech and language data</article-title>. <source>CLPsyc 2019-Sixth Workshop Comput Linguist Clin Psychol</source>. (<year>2019</year>):<fpage>55</fpage>&#x2013;<lpage>61</lpage>. <pub-id pub-id-type="doi">10.18653/v1/W19-3007</pub-id></mixed-citation></ref>
<ref id="B88"><label>88.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Casillas</surname> <given-names>M</given-names></name> <name><surname>Cristia</surname> <given-names>A</given-names></name></person-group>. <article-title>A step-by-step guide to collecting and analyzing long-format speech environment (LFSE) recordings</article-title>. <source>Collabra Psychol</source>. (<year>2019</year>) <volume>5</volume>(<issue>1</issue>):<fpage>24</fpage>. <pub-id pub-id-type="doi">10.1525/collabra.209</pub-id></mixed-citation></ref>
<ref id="B89"><label>89.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Low</surname> <given-names>DM</given-names></name> <name><surname>Bentley</surname> <given-names>KH</given-names></name> <name><surname>Ghosh</surname> <given-names>SS</given-names></name></person-group>. <article-title>Automated assessment of psychiatric disorders using speech: a systematic review</article-title>. <source>Laryngoscope Investig Otolaryngol</source>. (<year>2020</year>) <volume>5</volume>(<issue>1</issue>):<fpage>96</fpage>&#x2013;<lpage>116</lpage>. <pub-id pub-id-type="doi">10.1002/lio2.354</pub-id><pub-id pub-id-type="pmid">32128436</pub-id></mixed-citation></ref>
<ref id="B90"><label>90.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>A</given-names></name> <name><surname>Bessell</surname> <given-names>N</given-names></name> <name><surname>van den Heuvel</surname> <given-names>H</given-names></name> <name><surname>Saalasti</surname> <given-names>S</given-names></name> <name><surname>Klessa</surname> <given-names>K</given-names></name> <name><surname>Muller</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>The latest development of the DELAD project for sharing corpora of speech disorders</article-title>. <source>Clin Linguist Phon</source>. (<year>2022</year>) <volume>36</volume>(<issue>2&#x2013;3</issue>):<fpage>102</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1080/02699206.2021.1913514</pub-id><pub-id pub-id-type="pmid">33890543</pub-id></mixed-citation></ref>
<ref id="B91"><label>91.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Kr&#x00F6;ger</surname> <given-names>JL</given-names></name> <name><surname>Lutz</surname> <given-names>OHM</given-names></name> <name><surname>Raschke</surname> <given-names>P.</given-names></name></person-group> <comment>Privacy implications of voice and speech analysis&#x2013;information disclosure by inference. Priv Identity Manag Data Better Living AI Priv 14th IFIP WG 92 96117 116SIG 92 2 Int Summer Sch Wind Switz August 19&#x2013;23 2019 Revis Sel Pap 14</comment>. (<year>2020)</year>. <fpage>242</fpage>&#x2013;<lpage>58</lpage>.</mixed-citation></ref>
<ref id="B92"><label>92.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Batliner</surname> <given-names>A</given-names></name> <name><surname>Hantke</surname> <given-names>S</given-names></name> <name><surname>Schuller</surname> <given-names>B</given-names></name></person-group>. <article-title>Ethics and good practice in computational paralinguistics</article-title>. <source>IEEE Trans Affect Comput</source>. (<year>2022</year>) <volume>13</volume>(<issue>3</issue>):<fpage>1236</fpage>&#x2013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1109/TAFFC.2020.3021015</pub-id></mixed-citation></ref>
<ref id="B93"><label>93.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dikaios</surname> <given-names>K</given-names></name> <name><surname>Rempel</surname> <given-names>S</given-names></name> <name><surname>Dumpala</surname> <given-names>SH</given-names></name> <name><surname>Oore</surname> <given-names>S</given-names></name> <name><surname>Kiefte</surname> <given-names>M</given-names></name> <name><surname>Uher</surname> <given-names>R</given-names></name></person-group>. <article-title>Applications of speech analysis in psychiatry</article-title>. <source>Harv Rev Psychiatry</source>. (<year>2023</year>) <volume>31</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1097/HRP.0000000000000356</pub-id><pub-id pub-id-type="pmid">36608078</pub-id></mixed-citation></ref>
<ref id="B94"><label>94.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bettis</surname> <given-names>AH</given-names></name> <name><surname>Burke</surname> <given-names>TA</given-names></name> <name><surname>Nesi</surname> <given-names>J</given-names></name> <name><surname>Liu</surname> <given-names>RT</given-names></name></person-group>. <article-title>Digital technologies for emotion-regulation assessment and intervention: a conceptual review</article-title>. <source>Clin Psychol Sci</source>. (<year>2022</year>) <volume>10</volume>(<issue>1</issue>):<fpage>3</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.1177/21677026211011982</pub-id><pub-id pub-id-type="pmid">35174006</pub-id></mixed-citation></ref>
<ref id="B95"><label>95.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liddle</surname> <given-names>J</given-names></name> <name><surname>Burdon</surname> <given-names>M</given-names></name> <name><surname>Ireland</surname> <given-names>D</given-names></name> <name><surname>Carter</surname> <given-names>A</given-names></name> <name><surname>Knuepffer</surname> <given-names>C</given-names></name> <name><surname>Milevskiy</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>Balancing self-tracking and surveillance: legal, ethical and technological issues in using smartphones to monitor communication in people with health conditions</article-title>. <source>J Law Med</source>. (<year>2016</year>) <volume>24</volume>(<issue>2</issue>):<fpage>387</fpage>&#x2013;<lpage>97</lpage>.<pub-id pub-id-type="pmid">30137711</pub-id></mixed-citation></ref>
<ref id="B96"><label>96.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rashidisabet</surname> <given-names>H</given-names></name> <name><surname>Thomas</surname> <given-names>PJ</given-names></name> <name><surname>Ajilore</surname> <given-names>O</given-names></name> <name><surname>Zulueta</surname> <given-names>J</given-names></name> <name><surname>Moore</surname> <given-names>RC</given-names></name> <name><surname>Leow</surname> <given-names>A</given-names></name></person-group>. <article-title>A systems biology approach to the digital behaviorome</article-title>. <source>Curr Opin Syst Biol</source>. (<year>2020</year>) <volume>20</volume>:<fpage>8</fpage>&#x2013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1016/j.coisb.2020.07.003</pub-id></mixed-citation></ref>
<ref id="B97"><label>97.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hilty</surname> <given-names>DM</given-names></name> <name><surname>Armstrong</surname> <given-names>CM</given-names></name> <name><surname>Luxton</surname> <given-names>DD</given-names></name> <name><surname>Gentry</surname> <given-names>MT</given-names></name> <name><surname>Krupinski</surname> <given-names>EA</given-names></name></person-group>. <article-title>A scoping review of sensors, wearables, and remote monitoring for behavioral health: uses, outcomes, clinical competencies, and research directions</article-title>. <source>J Technol Behav Sci</source>. (<year>2021</year>) <volume>6</volume>:<fpage>278</fpage>&#x2013;<lpage>313</lpage>. <pub-id pub-id-type="doi">10.1007/s41347-021-00199-2</pub-id></mixed-citation></ref>
<ref id="B98"><label>98.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>CC</given-names></name> <name><surname>Chaspari</surname> <given-names>T</given-names></name> <name><surname>Provost</surname> <given-names>EM</given-names></name> <name><surname>Narayanan</surname> <given-names>SS</given-names></name></person-group>. <article-title>An engineering view on emotions and speech: from analysis and predictive models to responsible human-centered applications &#x007C; IEEE journals &#x0026; magazine &#x007C; IEEE Xplore</article-title>. <source>Proc IEEE</source>. (<year>2023</year>) <volume>111</volume>(<issue>10</issue>):<fpage>1142</fpage>&#x2013;<lpage>58</lpage>.</mixed-citation></ref>
<ref id="B99"><label>99.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Teepe</surname> <given-names>GW</given-names></name> <name><surname>Lukic</surname> <given-names>YX</given-names></name> <name><surname>Kleim</surname> <given-names>B</given-names></name> <name><surname>Jacobson</surname> <given-names>NC</given-names></name> <name><surname>Schneider</surname> <given-names>F</given-names></name> <name><surname>Santhanam</surname> <given-names>P</given-names></name><etal/></person-group> <article-title>Development of a digital biomarker and intervention for subclinical depression: study protocol for a longitudinal waitlist control study</article-title>. <source>BMC Psychol</source>. (<year>2023</year>) <volume>11</volume>(<issue>1</issue>):<fpage>186</fpage>. <pub-id pub-id-type="doi">10.1186/s40359-023-01215-1</pub-id><pub-id pub-id-type="pmid">37349832</pub-id></mixed-citation></ref>
<ref id="B100"><label>100.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cychosz</surname> <given-names>M</given-names></name> <name><surname>Romeo</surname> <given-names>R</given-names></name> <name><surname>Soderstrom</surname> <given-names>M</given-names></name> <name><surname>Scaff</surname> <given-names>C</given-names></name> <name><surname>Ganek</surname> <given-names>H</given-names></name> <name><surname>Cristia</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Longform recordings of everyday life: ethics for best practices</article-title>. <source>Behav Res Methods</source>. (<year>2020</year>) <volume>52</volume>:<fpage>1951</fpage>&#x2013;<lpage>69</lpage>. <pub-id pub-id-type="doi">10.3758/s13428-020-01365-9</pub-id><pub-id pub-id-type="pmid">32103465</pub-id></mixed-citation></ref>
<ref id="B101"><label>101.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dakanalis</surname> <given-names>A</given-names></name> <name><surname>Wiederhold</surname> <given-names>BK</given-names></name> <name><surname>Riva</surname> <given-names>G</given-names></name></person-group>. <article-title>Artificial intelligence: a game-changer for mental health care</article-title>. <source>Cyberpsychology Behav Soc Netw</source>. (<year>2024</year>) <volume>27</volume>(<issue>2</issue>):<fpage>100</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1089/cyber.2023.0723</pub-id></mixed-citation></ref>
<ref id="B102"><label>102.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Diaz-Asper</surname> <given-names>C</given-names></name> <name><surname>Hauglid</surname> <given-names>MK</given-names></name> <name><surname>Chandler</surname> <given-names>C</given-names></name> <name><surname>Cohen</surname> <given-names>AS</given-names></name> <name><surname>Foltz</surname> <given-names>PW</given-names></name> <name><surname>Elvev&#x00E5;g</surname> <given-names>B</given-names></name></person-group>. <article-title>A framework for language technologies in behavioral research and clinical applications: ethical challenges, implications, and solutions</article-title>. <source>Am Psychol</source>. (<year>2024</year>) <volume>79</volume>(<issue>1</issue>):<fpage>79</fpage>&#x2013;<lpage>91</lpage>. <pub-id pub-id-type="doi">10.1037/amp0001195</pub-id><pub-id pub-id-type="pmid">38236217</pub-id></mixed-citation></ref>
<ref id="B103"><label>103.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Smith</surname> <given-names>E</given-names></name> <name><surname>Storch</surname> <given-names>EA</given-names></name> <name><surname>Vahia</surname> <given-names>I</given-names></name> <name><surname>Wong</surname> <given-names>STC</given-names></name> <name><surname>Lavretsky</surname> <given-names>H</given-names></name> <name><surname>Cummings</surname> <given-names>JL</given-names></name><etal/></person-group> <article-title>Affective computing for late-life mood and cognitive disorders</article-title>. <source>Front Psychiatry</source>. (<year>2021</year>) <volume>12</volume>:<fpage>782183</fpage>. <pub-id pub-id-type="doi">10.3389/fpsyt.2021.782183</pub-id> <comment>((Smith, Eyre) The PRODEO Institute, San Francisco, CA, United States(Smith, Eyre) Organisation for Economic Co-operation and Development (OECD), Paris, France(Smith) Department of Neurology Neurological Sciences, Stanford University, Stanford, CA, United)</comment>.<pub-id pub-id-type="pmid">35002802</pub-id></mixed-citation></ref>
<ref id="B104"><label>104.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Whelan</surname> <given-names>R</given-names></name> <name><surname>Barbey</surname> <given-names>FM</given-names></name> <name><surname>Cominetti</surname> <given-names>MR</given-names></name> <name><surname>Gillan</surname> <given-names>CM</given-names></name> <name><surname>Rosicka</surname> <given-names>AM</given-names></name></person-group>. <article-title>Developments in scalable strategies for detecting early markers of cognitive decline</article-title>. <source>Transl Psychiatry</source>. (<year>2022</year>) <volume>12</volume>(<issue>1</issue>):<fpage>473</fpage>. <pub-id pub-id-type="doi">10.1038/s41398-022-02237-w</pub-id><pub-id pub-id-type="pmid">36351888</pub-id></mixed-citation></ref>
<ref id="B105"><label>105.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Milling</surname> <given-names>M</given-names></name> <name><surname>Pokorny</surname> <given-names>FB</given-names></name> <name><surname>Bartl-Pokorny</surname> <given-names>KD</given-names></name> <name><surname>Schuller</surname> <given-names>BW</given-names></name></person-group>. <article-title>Is speech the new blood? Recent progress in AI-based disease detection from audio in a nutshell</article-title>. <source>Front Digit Health</source>. (<year>2022</year>) <volume>4</volume>(<issue>101771889</issue>):<fpage>886615</fpage>. <pub-id pub-id-type="doi">10.3389/fdgth.2022.886615</pub-id><pub-id pub-id-type="pmid">35651538</pub-id></mixed-citation></ref>
<ref id="B106"><label>106.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nahar</surname> <given-names>JK</given-names></name> <name><surname>Lopez-Jimenez</surname> <given-names>F</given-names></name></person-group>. <article-title>Utilizing conversational artificial intelligence, voice, and phonocardiography analytics in heart failure care</article-title>. <source>HEART Fail Clin</source>. (<year>2022</year>) <volume>18</volume>(<issue>2</issue>):<fpage>311</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1016/j.hfc.2021.11.006</pub-id><pub-id pub-id-type="pmid">35341543</pub-id></mixed-citation></ref>
<ref id="B107"><label>107.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Papakyriakopoulos</surname> <given-names>O</given-names></name> <name><surname>Choi</surname> <given-names>ASG</given-names></name> <name><surname>Andrews</surname> <given-names>J</given-names></name> <name><surname>Bourke</surname> <given-names>R</given-names></name> <name><surname>Thong</surname> <given-names>W</given-names></name> <name><surname>Zhao</surname> <given-names>D</given-names></name><etal/></person-group> <article-title>Augmented datasheets for speech datasets and ethical decision-making</article-title>. <conf-name>2023 ACM Conference on Fairness, Accountability, and Transparency</conf-name> (<year>2023</year>). p. <fpage>881</fpage>&#x2013;<lpage>904</lpage>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2305.04672">http://arxiv.org/abs/2305.04672</ext-link> <comment>(Accessed May 16, 2024)</comment>.</mixed-citation></ref>
<ref id="B108"><label>108.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Triantafyllopoulos</surname> <given-names>A</given-names></name> <name><surname>Kathan</surname> <given-names>A</given-names></name> <name><surname>Baird</surname> <given-names>A</given-names></name> <name><surname>Christ</surname> <given-names>L</given-names></name> <name><surname>Gebhard</surname> <given-names>A</given-names></name> <name><surname>Gerczuk</surname> <given-names>M</given-names></name><etal/></person-group> <comment>HEAR4Health: A blueprint for making computer audition a staple of modern healthcare</comment>. <comment>ArXiv Prepr ArXiv230110477</comment>. (<year>2023</year>).</mixed-citation></ref>
<ref id="B109"><label>109.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Frohlich</surname> <given-names>H</given-names></name> <name><surname>Bontridder</surname> <given-names>N</given-names></name> <name><surname>Petrovska-Delacreta</surname> <given-names>D</given-names></name> <name><surname>Glaab</surname> <given-names>E</given-names></name> <name><surname>Kluge</surname> <given-names>F</given-names></name> <name><surname>El Yacoubi</surname> <given-names>M</given-names></name><etal/></person-group> <article-title>Leveraging the potential of digital technology for better individualized treatment of Parkinson&#x2019;s disease</article-title>. <source>Front Neurol</source>. (<year>2022</year>) <volume>13</volume>:<fpage>788427</fpage>. <pub-id pub-id-type="doi">10.3389/fneur.2022.788427</pub-id><pub-id pub-id-type="pmid">35295840</pub-id></mixed-citation></ref>
<ref id="B110"><label>110.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Meltzer</surname> <given-names>JA</given-names></name></person-group>. <article-title>Towards early prediction of Alzheimer&#x2019;s disease through language samples</article-title>. <source>EclinicalMedicine</source>. (<year>2020</year>) <volume>29</volume>. <pub-id pub-id-type="doi">10.1016/j.eclinm.2020.100644</pub-id><pub-id pub-id-type="pmid">33294826</pub-id></mixed-citation></ref>
<ref id="B111"><label>111.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Adams</surname> <given-names>WR</given-names></name></person-group>. <article-title>High-accuracy detection of early Parkinson&#x2019;s disease using multiple characteristics of finger movement while typing</article-title>. <source>PLoS One</source>. (<year>2017</year>) <volume>12</volume>(<issue>11</issue>):<fpage>e0188226</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0188226</pub-id><pub-id pub-id-type="pmid">29190695</pub-id></mixed-citation></ref>
<ref id="B112"><label>112.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nelson</surname> <given-names>BW</given-names></name> <name><surname>Allen</surname> <given-names>NB</given-names></name></person-group>. <article-title>Extending the passive-sensing toolbox: using smart-home technology in psychological science</article-title>. <source>Perspect Psychol Sci</source>. (<year>2018</year>) <volume>13</volume>(<issue>6</issue>):<fpage>718</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1177/1745691618776008</pub-id><pub-id pub-id-type="pmid">30217132</pub-id></mixed-citation></ref>
<ref id="B113"><label>113.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Thomas</surname> <given-names>J</given-names></name> <name><surname>Arya</surname> <given-names>L</given-names></name> <name><surname>Hussain</surname> <given-names>M</given-names></name> <name><surname>Prasanna</surname> <given-names>SRM.</given-names></name></person-group> <article-title>Speech act theory and ethics of speech processing as distinct stages: the ethics of collecting, contextualizing and the releasing of (speech) data</article-title>. In: <conf-name>2023 IEEE International Symposium on Ethics in Engineering, Science, and Technology (ETHICS)</conf-name>. <publisher-loc>West Lafayette, IN, USA</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2023</year>). p. <fpage>1</fpage>&#x2013;<lpage>10</lpage>. <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/document/10154932/">https://ieeexplore.ieee.org/document/10154932/</ext-link> <comment>(Accessed May 16, 2024)</comment>.</mixed-citation></ref>
<ref id="B114"><label>114.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Corona Hern&#x00E1;ndez</surname> <given-names>H</given-names></name> <name><surname>Corcoran</surname> <given-names>C</given-names></name> <name><surname>Achim</surname> <given-names>AM</given-names></name> <name><surname>de Boer</surname> <given-names>JN</given-names></name> <name><surname>Boerma</surname> <given-names>T</given-names></name> <name><surname>Brederoo</surname> <given-names>SG</given-names></name><etal/></person-group> <article-title>Natural language processing markers for psychosis and other psychiatric disorders: emerging themes and research agenda from a cross-linguistic workshop</article-title>. <source>Schizophr Bull</source>. (<year>2023</year>) <volume>49</volume>(<issue>2</issue>):<fpage>S86</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1093/schbul/sbac215</pub-id></mixed-citation></ref>
<ref id="B115"><label>115.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Isernia</surname> <given-names>S</given-names></name> <name><surname>Cabinio</surname> <given-names>M</given-names></name> <name><surname>Di Tella</surname> <given-names>S</given-names></name> <name><surname>Pazzi</surname> <given-names>S</given-names></name> <name><surname>Vannetti</surname> <given-names>F</given-names></name> <name><surname>Gerli</surname> <given-names>F</given-names></name><etal/></person-group> <article-title>Diagnostic validity of the smart aging serious game: an innovative tool for digital phenotyping of mild neurocognitive disorder</article-title>. <source>J Alzheimers Dis</source>. (<year>2021</year>) <volume>83</volume>(<issue>4</issue>):<fpage>1789</fpage>&#x2013;<lpage>801</lpage>. <pub-id pub-id-type="doi">10.3233/JAD-210347</pub-id><pub-id pub-id-type="pmid">34459394</pub-id></mixed-citation></ref>
<ref id="B116"><label>116.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Batliner</surname> <given-names>A</given-names></name> <name><surname>Neumann</surname> <given-names>M</given-names></name> <name><surname>Burkhardt</surname> <given-names>F</given-names></name> <name><surname>Baird</surname> <given-names>A</given-names></name> <name><surname>Meyer</surname> <given-names>S</given-names></name> <name><surname>Vu</surname> <given-names>NT</given-names></name><etal/></person-group> <article-title>Ethical awareness in paralinguistics: a taxonomy of applications</article-title>. <source>Int J Human Comput Interact</source>. (<year>2022</year>):<fpage>1</fpage>&#x2013;<lpage>18</lpage>.</mixed-citation></ref>
<ref id="B117"><label>117.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sela</surname> <given-names>Y</given-names></name> <name><surname>Santamaria</surname> <given-names>L</given-names></name> <name><surname>Amichai-Hamburge</surname> <given-names>Y</given-names></name> <name><surname>Leong</surname> <given-names>V</given-names></name></person-group>. <article-title>Towards a personalized multi-domain digital neurophenotyping model for the detection and treatment of mood trajectories</article-title>. <source>Sensors</source>. (<year>2020</year>) <volume>20</volume>(<issue>20</issue>):<fpage>5781</fpage>. <pub-id pub-id-type="doi">10.3390/s20205781</pub-id><pub-id pub-id-type="pmid">33053889</pub-id></mixed-citation></ref>
<ref id="B118"><label>118.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Anglade</surname> <given-names>C</given-names></name> <name><surname>Tousignant</surname> <given-names>M</given-names></name> <name><surname>Gaboury</surname> <given-names>I</given-names></name></person-group>. <article-title>Rigorous qualitative research involving data collected remotely from people with communication disorders: experience from a telerehabilitation trial</article-title>. <source>Neurorehabil Neural Repair</source>. (<year>2022</year>) <volume>36</volume>(<issue>8</issue>):<fpage>557</fpage>&#x2013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.1177/15459683221100489</pub-id><pub-id pub-id-type="pmid">35599591</pub-id></mixed-citation></ref>
<ref id="B119"><label>119.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schuller</surname> <given-names>DM</given-names></name> <name><surname>Schuller</surname> <given-names>BW</given-names></name></person-group>. <article-title>A review on five recent and near-future developments in computational processing of emotion in the human voice</article-title>. <source>Emot Rev</source>. (<year>2021</year>) <volume>13</volume>(<issue>1</issue>):<fpage>44</fpage>&#x2013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1177/1754073919898526</pub-id></mixed-citation></ref>
<ref id="B120"><label>120.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Villongco</surname> <given-names>C</given-names></name> <name><surname>Khan</surname> <given-names>F</given-names></name></person-group>. <article-title>Sorry I Didn&#x2019;t hear you&#x201D;. The ethics of voice computing and AI in high risk mental health populations</article-title>. <source>AJOB Neurosci</source>. (<year>2020</year>) <volume>11</volume>(<issue>2</issue>):<fpage>105</fpage>&#x2013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1080/21507740.2020.1740355</pub-id><pub-id pub-id-type="pmid">32228383</pub-id></mixed-citation></ref>
<ref id="B121"><label>121.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Woodward</surname> <given-names>K</given-names></name> <name><surname>Kanjo</surname> <given-names>E</given-names></name> <name><surname>Brown</surname> <given-names>DJ</given-names></name> <name><surname>McGinnity</surname> <given-names>TM</given-names></name> <name><surname>Inkster</surname> <given-names>B</given-names></name> <name><surname>Macintyre</surname> <given-names>DJ</given-names></name><etal/></person-group> <article-title>Beyond mobile apps: a survey of technologies for mental well-being</article-title>. <source>IEEE Trans Affect Comput</source>. (<year>2020</year>) <volume>13</volume>(<issue>3</issue>):<fpage>1216</fpage>&#x2013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.1109/TAFFC.2020.3015018</pub-id></mixed-citation></ref>
<ref id="B122"><label>122.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nazer</surname> <given-names>LH</given-names></name> <name><surname>Zatarah</surname> <given-names>R</given-names></name> <name><surname>Waldrip</surname> <given-names>S</given-names></name> <name><surname>Ke</surname> <given-names>JXC</given-names></name> <name><surname>Moukheiber</surname> <given-names>M</given-names></name> <name><surname>Khanna</surname> <given-names>AK</given-names></name><etal/></person-group> <article-title>Bias in artificial intelligence algorithms and recommendations for mitigation</article-title>. <source>PLoS Digit Health</source>. (<year>2023</year>) <volume>2</volume>(<issue>6</issue>):<fpage>e0000278</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pdig.0000278</pub-id><pub-id pub-id-type="pmid">37347721</pub-id></mixed-citation></ref>
<ref id="B123"><label>123.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Jokinen</surname> <given-names>K</given-names></name> <name><surname>Declerck</surname> <given-names>T.</given-names></name></person-group> <comment>Researching Less-Resourced Languages&#x2014;the DigiSami Corpus</comment>. (<year>2018</year>). p. <fpage>3382</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation></ref>
<ref id="B124"><label>124.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Casteleyn</surname> <given-names>L</given-names></name> <name><surname>Dumez</surname> <given-names>B</given-names></name> <name><surname>Van Damme</surname> <given-names>K</given-names></name> <name><surname>Anwar</surname> <given-names>WA</given-names></name></person-group>. <article-title>Ethics and data protection in human biomarker studies in environmental health</article-title>. <source>Int J Hyg Environ Health</source>. (<year>2013</year>) <volume>216</volume>(<issue>5</issue>):<fpage>599</fpage>&#x2013;<lpage>605</lpage>. <pub-id pub-id-type="doi">10.1016/j.ijheh.2013.03.016</pub-id><pub-id pub-id-type="pmid">23660231</pub-id></mixed-citation></ref>
<ref id="B125"><label>125.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Karabekmez</surname> <given-names>ME</given-names></name></person-group>. <article-title>Data ethics in digital health and genomics</article-title>. <source>New Bioeth</source>. (<year>2021</year>) <volume>27</volume>(<issue>4</issue>):<fpage>320</fpage>&#x2013;<lpage>33</lpage>. <pub-id pub-id-type="doi">10.1080/20502877.2021.1996965</pub-id><pub-id pub-id-type="pmid">34747348</pub-id></mixed-citation></ref>
<ref id="B126"><label>126.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>CH</given-names></name> <name><surname>Yoon</surname> <given-names>HJ</given-names></name></person-group>. <article-title>Medical big data: promise and challenges</article-title>. <source>Kidney Res Clin Pract</source>. (<year>2017</year>) <volume>36</volume>(<issue>1</issue>):<fpage>3</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.23876/j.krcp.2017.36.1.3</pub-id><pub-id pub-id-type="pmid">28392994</pub-id></mixed-citation></ref>
<ref id="B127"><label>127.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tiffin</surname> <given-names>N</given-names></name> <name><surname>George</surname> <given-names>A</given-names></name> <name><surname>LeFevre</surname> <given-names>AE</given-names></name></person-group>. <article-title>How to use relevant data for maximal benefit with minimal risk: digital health data governance to protect vulnerable populations in low-income and middle-income countries</article-title>. <source>BMJ Glob Health</source>. (<year>2019</year>) <volume>4</volume>(<issue>2</issue>):<fpage>e001395</fpage>. <pub-id pub-id-type="doi">10.1136/bmjgh-2019-001395</pub-id><pub-id pub-id-type="pmid">31139457</pub-id></mixed-citation></ref>
<ref id="B128"><label>128.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cummins</surname> <given-names>N</given-names></name> <name><surname>Scherer</surname> <given-names>S</given-names></name> <name><surname>Krajewski</surname> <given-names>J</given-names></name> <name><surname>Schnieder</surname> <given-names>S</given-names></name> <name><surname>Epps</surname> <given-names>J</given-names></name> <name><surname>Quatieri</surname> <given-names>TF</given-names></name></person-group>. <article-title>A review of depression and suicide risk assessment using speech analysis</article-title>. <source>Speech Commun</source>. (<year>2015</year>) <volume>71</volume>:<fpage>10</fpage>&#x2013;<lpage>49</lpage>. <pub-id pub-id-type="doi">10.1016/j.specom.2015.03.004</pub-id></mixed-citation></ref>
<ref id="B129"><label>129.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rubio</surname> <given-names>DM</given-names></name> <name><surname>Schoenbaum</surname> <given-names>EE</given-names></name> <name><surname>Lee</surname> <given-names>LS</given-names></name> <name><surname>Schteingart</surname> <given-names>DE</given-names></name> <name><surname>Marantz</surname> <given-names>PR</given-names></name> <name><surname>Anderson</surname> <given-names>KE</given-names></name><etal/></person-group> <article-title>Defining translational research: implications for training</article-title>. <source>Acad Med</source>. (<year>2010</year>) <volume>85</volume>(<issue>3</issue>):<fpage>470</fpage>. <pub-id pub-id-type="doi">10.1097/ACM.0b013e3181ccd618</pub-id><pub-id pub-id-type="pmid">20182120</pub-id></mixed-citation></ref>
<ref id="B130"><label>130.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Bensoussan</surname> <given-names>Y</given-names></name> <name><surname>Sigaras</surname> <given-names>A</given-names></name> <name><surname>Rameau</surname> <given-names>A</given-names></name> <name><surname>Elemento</surname> <given-names>O</given-names></name> <name><surname>Powell</surname> <given-names>M</given-names></name> <name><surname>Dorr</surname> <given-names>D</given-names></name><etal/></person-group> <comment>Bridge2AI-Voice: An ethically-sourced, diverse voice dataset linked to health information</comment>. <comment>PhysioNet</comment>. (<year>2025</year>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://physionet.org/content/b2ai-voice/2.0.0/">https://physionet.org/content/b2ai-voice/2.0.0/</ext-link> <comment>(Accessed August 15, 2025)</comment>.</mixed-citation></ref>
<ref id="B131"><label>131.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Awan</surname> <given-names>SN</given-names></name> <name><surname>Bahr</surname> <given-names>R</given-names></name> <name><surname>Watts</surname> <given-names>S</given-names></name> <name><surname>Boyer</surname> <given-names>M</given-names></name> <name><surname>Budinsky</surname> <given-names>R</given-names></name> <name><surname>Bensoussan</surname> <given-names>Y</given-names></name></person-group>. <article-title>Evidence-Based recommendations for tablet recordings from the Bridge2AI-voice acoustic experiments</article-title>. <source>J Voice</source>. (<year>2024</year>). <comment>Available online at:</comment> <ext-link ext-link-type="uri" xlink:href="https://www.sciencedirect.com/science/article/pii/S0892199724002832">https://www.sciencedirect.com/science/article/pii/S0892199724002832</ext-link> <comment>(Accessed March 14, 2025)</comment>. <pub-id pub-id-type="doi">10.1016/j.jvoice.2024.08.029</pub-id></mixed-citation></ref>
<ref id="B132"><label>132.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alberto</surname> <given-names>IRI</given-names></name> <name><surname>Alberto</surname> <given-names>NRI</given-names></name> <name><surname>Ghosh</surname> <given-names>AK</given-names></name> <name><surname>Jain</surname> <given-names>B</given-names></name> <name><surname>Jayakumar</surname> <given-names>S</given-names></name> <name><surname>Martinez-Martin</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>The impact of commercial health datasets on medical research and health-care algorithms</article-title>. <source>Lancet Digit Health</source>. (<year>2023</year>) <volume>5</volume>(<issue>5</issue>):<fpage>e288</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.1016/S2589-7500(23)00025-0</pub-id><pub-id pub-id-type="pmid">37100543</pub-id></mixed-citation></ref>
<ref id="B133"><label>133.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Daneshjou</surname> <given-names>R</given-names></name> <name><surname>Smith</surname> <given-names>MP</given-names></name> <name><surname>Sun</surname> <given-names>MD</given-names></name> <name><surname>Rotemberg</surname> <given-names>V</given-names></name> <name><surname>Zou</surname> <given-names>J</given-names></name></person-group>. <article-title>Lack of transparency and potential bias in artificial intelligence data sets and algorithms</article-title>. <source>JAMA Dermatol</source>. (<year>2021</year>) <volume>157</volume>(<issue>11</issue>):<fpage>1362</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1001/jamadermatol.2021.3129</pub-id><pub-id pub-id-type="pmid">34550305</pub-id></mixed-citation></ref>
<ref id="B134"><label>134.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khan</surname> <given-names>B</given-names></name> <name><surname>Fatima</surname> <given-names>H</given-names></name> <name><surname>Qureshi</surname> <given-names>A</given-names></name> <name><surname>Kumar</surname> <given-names>S</given-names></name> <name><surname>Hanan</surname> <given-names>A</given-names></name> <name><surname>Hussain</surname> <given-names>J</given-names></name><etal/></person-group> <article-title>Drawbacks of artificial intelligence and their potential solutions in the healthcare sector</article-title>. <source>Biomed Mater Devices</source>. (<year>2023</year>) <volume>1</volume>(<issue>2</issue>):<fpage>731</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1007/s44174-023-00063-2</pub-id></mixed-citation></ref>
<ref id="B135"><label>135.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Aung</surname> <given-names>YYM</given-names></name> <name><surname>Wong</surname> <given-names>DCS</given-names></name> <name><surname>Ting</surname> <given-names>DSW</given-names></name></person-group>. <article-title>The promise of artificial intelligence: a review of the opportunities and challenges of artificial intelligence in healthcare</article-title>. <source>Br Med Bull</source>. (<year>2021</year>) <volume>139</volume>(<issue>1</issue>):<fpage>4</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1093/bmb/ldab016</pub-id><pub-id pub-id-type="pmid">34405854</pub-id></mixed-citation></ref>
<ref id="B136"><label>136.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ng</surname> <given-names>MY</given-names></name> <name><surname>Youssef</surname> <given-names>A</given-names></name> <name><surname>Miner</surname> <given-names>AS</given-names></name> <name><surname>Sarellano</surname> <given-names>D</given-names></name> <name><surname>Long</surname> <given-names>J</given-names></name> <name><surname>Larson</surname> <given-names>DB</given-names></name><etal/></person-group> <article-title>Perceptions of data set experts on important characteristics of health data sets ready for machine learning: a qualitative study</article-title>. <source>JAMA Netw Open</source>. (<year>2023</year>) <volume>6</volume>(<issue>12</issue>):<fpage>e2345892</fpage>. <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2023.45892</pub-id><pub-id pub-id-type="pmid">38039004</pub-id></mixed-citation></ref>
<ref id="B137"><label>137.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Evangelista</surname> <given-names>EG</given-names></name> <name><surname>B&#x00E9;lisle-Pipon</surname> <given-names>JC</given-names></name> <name><surname>Naunheim</surname> <given-names>MR</given-names></name> <name><surname>Powell</surname> <given-names>M</given-names></name> <name><surname>Gallois</surname> <given-names>H</given-names></name></person-group>, <collab>Bridge2AI-Voice Consortium</collab>, <etal>et al.</etal> <article-title>Voice as a biomarker in health-tech: mapping the evolving landscape of voice biomarkers in the start-up world</article-title>. <source>Otolaryngol Head Neck Surg Off J Am Acad Otolaryngol Head Neck Surg</source>. (<year>2024</year>) <volume>171</volume>(<issue>2</issue>):<fpage>340</fpage>&#x2013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1002/ohn.830</pub-id></mixed-citation></ref>
<ref id="B138"><label>138.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>H</given-names></name> <name><surname>Friedman</surname> <given-names>ME</given-names></name> <name><surname>Cukor</surname> <given-names>P</given-names></name> <name><surname>Ahern</surname> <given-names>D</given-names></name></person-group>. <article-title>Interactive voice response system (IVRS) in health care services</article-title>. <source>Nurs Outlook</source>. (<year>2003</year>) <volume>51</volume>(<issue>6</issue>):<fpage>277</fpage>&#x2013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1016/S0029-6554(03)00161-1</pub-id><pub-id pub-id-type="pmid">14688763</pub-id></mixed-citation></ref>
<ref id="B139"><label>139.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Paik</surname> <given-names>KE</given-names></name> <name><surname>Hicklen</surname> <given-names>R</given-names></name> <name><surname>Kaggwa</surname> <given-names>F</given-names></name> <name><surname>Puyat</surname> <given-names>CV</given-names></name> <name><surname>Nakayama</surname> <given-names>LF</given-names></name> <name><surname>Ong</surname> <given-names>BA</given-names></name><etal/></person-group> <article-title>Digital determinants of health: health data poverty amplifies existing health disparities&#x2014;a scoping review</article-title>. <source>PLoS Digit Health</source>. (<year>2023</year>) <volume>2</volume>(<issue>10</issue>):<fpage>e0000313</fpage>. <pub-id pub-id-type="doi">10.1371/journal.pdig.0000313</pub-id><pub-id pub-id-type="pmid">37824445</pub-id></mixed-citation></ref>
<ref id="B140"><label>140.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Coppock</surname> <given-names>H</given-names></name> <name><surname>Jones</surname> <given-names>L</given-names></name> <name><surname>Kiskin</surname> <given-names>I</given-names></name> <name><surname>Schuller</surname> <given-names>B</given-names></name></person-group>. <article-title>COVID-19 detection from audio: seven grains of salt</article-title>. <source>Lancet Digit Health</source>. (<year>2021</year>) <volume>3</volume>(<issue>9</issue>):<fpage>e537</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1016/S2589-7500(21)00141-2</pub-id><pub-id pub-id-type="pmid">34303644</pub-id></mixed-citation></ref>
<ref id="B141"><label>141.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Health</surname> <given-names>TLD</given-names></name></person-group>. <article-title>Do I sound sick?</article-title> <source>Lancet Digit Health</source>. (<year>2021</year>) <volume>3</volume>(<issue>9</issue>):<fpage>e534</fpage>.<pub-id pub-id-type="pmid">34446262</pub-id></mixed-citation></ref>
<ref id="B142"><label>142.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Corple</surname> <given-names>DJ</given-names></name> <name><surname>Linabary</surname> <given-names>JR</given-names></name></person-group>. <article-title>From data points to people: feminist situated ethics in online big data research</article-title>. <source>Int J Soc Res Methodol</source>. (<year>2020</year>) <volume>23</volume>(<issue>2</issue>):<fpage>155</fpage>&#x2013;<lpage>68</lpage>. <pub-id pub-id-type="doi">10.1080/13645579.2019.1649832</pub-id></mixed-citation></ref>
<ref id="B143"><label>143.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Montiel</surname> <given-names>CJ</given-names></name> <name><surname>Uyheng</surname> <given-names>J</given-names></name></person-group>. <article-title>Foundations for a decolonial big data psychology</article-title>. <source>J Soc Issues</source>. (<year>2022</year>) <volume>78</volume>(<issue>2</issue>):<fpage>278</fpage>&#x2013;<lpage>97</lpage>. <pub-id pub-id-type="doi">10.1111/josi.12439</pub-id></mixed-citation></ref>
<ref id="B144"><label>144.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cheung</surname> <given-names>K</given-names></name> <name><surname>Earp</surname> <given-names>BD</given-names></name> <name><surname>Patch</surname> <given-names>K</given-names></name> <name><surname>Yaden</surname> <given-names>DB</given-names></name></person-group>. <article-title>Distinctive but not exceptional: the risks of psychedelic ethical exceptionalism</article-title>. <source>Am J Bioeth</source>. (<year>2025</year>) <volume>25</volume>(<issue>1</issue>):<fpage>16</fpage>&#x2013;<lpage>28</lpage>. <pub-id pub-id-type="doi">10.1080/15265161.2024.2433421</pub-id><pub-id pub-id-type="pmid">39804307</pub-id></mixed-citation></ref>
<ref id="B145"><label>145.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Garrison</surname> <given-names>NA</given-names></name> <name><surname>Brothers</surname> <given-names>KB</given-names></name> <name><surname>Goldenberg</surname> <given-names>AJ</given-names></name> <name><surname>Lynch</surname> <given-names>JA</given-names></name></person-group>. <article-title>Genomic contextualism: shifting the rhetoric of genetic exceptionalism</article-title>. <source>Am J Bioeth AJOB</source>. (<year>2019</year>) <volume>19</volume>(<issue>1</issue>):<fpage>51</fpage>&#x2013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1080/15265161.2018.1544304</pub-id><pub-id pub-id-type="pmid">30676903</pub-id></mixed-citation></ref>
<ref id="B146"><label>146.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Murray</surname> <given-names>TH</given-names></name></person-group>. <article-title>Is genetic exceptionalism past its sell-by date? On genomic diaries, context, and content</article-title>. <source>Am J Bioeth</source>. (<year>2019</year>) <volume>19</volume>(<issue>1</issue>):<fpage>13</fpage>&#x2013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1080/15265161.2018.1552038</pub-id><pub-id pub-id-type="pmid">30676900</pub-id></mixed-citation></ref>
<ref id="B147"><label>147.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shevchenko</surname> <given-names>S</given-names></name> <name><surname>Zhavoronkov</surname> <given-names>A</given-names></name></person-group>. <article-title>The role of exceptionalism in the evolution of bioethical regulation</article-title>. <source>Camb Q Healthc Ethics</source>. (<year>2024</year>) <volume>33</volume>(<issue>2</issue>):<fpage>185</fpage>&#x2013;<lpage>97</lpage>. <pub-id pub-id-type="doi">10.1017/S0963180123000336</pub-id><pub-id pub-id-type="pmid">37288492</pub-id></mixed-citation></ref>
<ref id="B148"><label>148.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alugubelli</surname> <given-names>R</given-names></name></person-group>. <article-title>Exploratory study of artificial intelligence in healthcare</article-title>. <source>Int J Innov Eng Res Technol</source>. (<year>2016</year>) <volume>3</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>10</lpage>.</mixed-citation></ref>
<ref id="B149"><label>149.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mandl</surname> <given-names>KD</given-names></name> <name><surname>Manrai</surname> <given-names>AK</given-names></name></person-group>. <article-title>Potential excessive testing at scale: biomarkers, genomics, and machine learning</article-title>. <source>JAMA</source>. (<year>2019</year>) <volume>321</volume>(<issue>8</issue>):<fpage>739</fpage>&#x2013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1001/jama.2019.0286</pub-id><pub-id pub-id-type="pmid">30735228</pub-id></mixed-citation></ref>
<ref id="B150"><label>150.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Panagoulias</surname> <given-names>DP</given-names></name> <name><surname>Virvou</surname> <given-names>M</given-names></name> <name><surname>Tsihrintzis</surname> <given-names>GA</given-names></name></person-group>. <article-title>Regulation and validation challenges in artificial intelligence-empowered healthcare applications&#x2014;the case of blood-retrieved biomarkers</article-title>. In: <person-group person-group-type="editor"><name><surname>Virvou</surname> <given-names>M</given-names></name> <name><surname>Saruwatari</surname> <given-names>T</given-names></name> <name><surname>Jain</surname> <given-names>LC</given-names></name></person-group>, editors. <source>Knowledge-Based Software Engineering: 2022</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name> (<year>2023</year>). p. <fpage>97</fpage>&#x2013;<lpage>110</lpage>. <comment>(Learning and Analytics in Intelligent Systems)</comment>.</mixed-citation></ref>
<ref id="B151"><label>151.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vazquez-Levin</surname> <given-names>MH</given-names></name> <name><surname>Reventos</surname> <given-names>J</given-names></name> <name><surname>Zaki</surname> <given-names>G</given-names></name></person-group>. <article-title>Editorial: artificial intelligence: a step forward in biomarker discovery and integration towards improved cancer diagnosis and treatment</article-title>. <source>Front Oncol</source>. (<year>2023</year>) <volume>13</volume>:<fpage>1161118</fpage>. <pub-id pub-id-type="doi">10.3389/fonc.2023.116111</pub-id><pub-id pub-id-type="pmid">37064106</pub-id></mixed-citation></ref>
<ref id="B152"><label>152.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gallois</surname> <given-names>H</given-names></name> <name><surname>Ivkovic</surname> <given-names>L</given-names></name> <name><surname>Evangelista</surname> <given-names>E</given-names></name> <name><surname>Bensoussan</surname> <given-names>Y</given-names></name> <name><surname>Dorr</surname> <given-names>DA</given-names></name> <name><surname>Elemento</surname> <given-names>O</given-names></name><etal/></person-group> <article-title>&#x201C;Low risk, high happiness&#x201D;: a review of openly declared ethical and legal practices in voice biomarker health-tech start-ups</article-title>. <source>Health Care Anal</source>. (<year>2025</year>). <comment>Available online at:</comment> <pub-id pub-id-type="doi">10.1007/s10728-025-00539-w</pub-id> <comment>(Accessed November 7, 2025)</comment>.<pub-id pub-id-type="pmid">41091343</pub-id></mixed-citation></ref>
<ref id="B153"><label>153.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Blatter</surname> <given-names>A</given-names></name> <name><surname>Gallois</surname> <given-names>H</given-names></name> <name><surname>Evangelista</surname> <given-names>E</given-names></name> <name><surname>Bensoussan</surname> <given-names>Y</given-names></name></person-group>, <collab>Bridge2AI-Voice Consortium</collab>, <person-group person-group-type="author"><name><surname>B&#x00E9;lisle-Pipon</surname> <given-names>JC</given-names></name></person-group>. <article-title>&#x201C;Voice is the new blood&#x201D;: a discourse analysis of voice AI health-tech start-up websites</article-title>. <source>Front Digit Health</source>. (<year>2025</year>) <volume>7</volume>:<fpage>1568159</fpage>. <pub-id pub-id-type="doi">10.3389/fdgth.2025.1568159</pub-id><pub-id pub-id-type="pmid">40510414</pub-id></mixed-citation></ref>
<ref id="B154"><label>154.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wilson</surname> <given-names>P</given-names></name></person-group>. <article-title>The voice and its metaphors</article-title>. <source>Aust Voice</source>. (<year>2004</year>) <volume>10</volume>:<fpage>16</fpage>&#x2013;<lpage>9</lpage>.</mixed-citation></ref>
<ref id="B155"><label>155.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cummins</surname> <given-names>N</given-names></name> <name><surname>Dineley</surname> <given-names>J</given-names></name> <name><surname>Conde</surname> <given-names>P</given-names></name> <name><surname>Matcham</surname> <given-names>F</given-names></name> <name><surname>Siddi</surname> <given-names>S</given-names></name> <name><surname>Lamers</surname> <given-names>F</given-names></name><etal/></person-group> <article-title>Multilingual markers of depression in remotely collected speech samples: a preliminary analysis</article-title>. <source>J Affect Disord</source>. (<year>2023</year>) <volume>341</volume>:<fpage>128</fpage>&#x2013;<lpage>36</lpage>. <pub-id pub-id-type="doi">10.1016/j.jad.2023.08.097</pub-id><pub-id pub-id-type="pmid">37598722</pub-id></mixed-citation></ref>
<ref id="B156"><label>156.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ritter</surname> <given-names>E</given-names></name></person-group>. <article-title>Your voice gave you away: the privacy risks of voice-inferred information</article-title>. <source>Duke Law J</source>. (<year>2021</year>) <volume>71</volume>(<issue>3</issue>).</mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2540024/overview">Boluwaji Ade Akinnuwesi</ext-link>, University of Eswatini, Eswatini</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1517165/overview">Michaela Th. Mayrhofer</ext-link>, Papillon Pathways e.U., Austria</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3314953/overview">Margaret McDonald</ext-link>, VNS Health, United States</p></fn>
</fn-group>
<fn-group>
<fn id="n3"><p><sup>1</sup>Voice, or vocalization, is the sound made by the vibration between vocal folds in the larynx when air is pushed by the lungs (<xref ref-type="bibr" rid="B2">2</xref>). Speech, on the other hand, refers to the human vocalization made by the coordinated muscle movement of the lips and tongue that shapes recognizable sounds (<xref ref-type="bibr" rid="B5">5</xref>). Speech also involves prosody, lexicality and linguistic features like spoken words that can also contain potential biomarkers of diseases (<xref ref-type="bibr" rid="B6">6</xref>). However, for easier comprehension, this article will refer to voice and speech as one and the same.</p></fn>
</fn-group>
</back>
</article>