<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Cognit.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Cognition</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Cognit.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">2813-4532</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fcogn.2026.1638501</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Gender identity impacts the perception of vocal congruence</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>De Livio</surname> <given-names>Chiara</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3061384"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Mazzuca</surname> <given-names>Claudia</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/391617"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Fini</surname> <given-names>Chiara</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/110622"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
</contrib>
<contrib contrib-type="author">
<name><surname>Borghi</surname> <given-names>Anna M.</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/8484"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Department of Psychology, Sapienza University of Rome</institution>, <city>Rome</city>, <country country="it">Italy</country></aff>
<aff id="aff2"><label>2</label><institution>Department of Dynamic and Clinical Psychology and Health Studies, Sapienza University of Rome</institution>, <city>Rome</city>, <country country="it">Italy</country></aff>
<aff id="aff3"><label>3</label><institution>Institute of Cognitive Sciences and Technologies, Italian National Research Council</institution>, <city>Rome</city>, <country country="it">Italy</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Chiara De Livio, <email xlink:href="mailto:chiara.delivio@uniroma1.it">chiara.delivio@uniroma1.it</email></corresp>
<fn fn-type="present-address" id="fn001"><label>&#x02020;</label><p>Present address: Chiara Fini, Faculty of Human Sciences, Education and Sport, UniPegaso, Italy</p></fn></author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-03-10">
<day>10</day>
<month>03</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>5</volume>
<elocation-id>1638501</elocation-id>
<history>
<date date-type="received">
<day>30</day>
<month>05</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>24</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 De Livio, Mazzuca, Fini and Borghi.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>De Livio, Mazzuca, Fini and Borghi</copyright-holder>
<license>
<ali:license_ref start_date="2026-03-10">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>This study investigated vocal congruence, i.e., the alignment between self-voice perception and the sense of identity, across cisgender and transgender and gender non-conforming (TGNC) participants (<italic>N</italic> = 44) in three conditions: Silent Reading, Reading Aloud, and Listening to recorded speech. Results revealed that TGNC participants reported significantly lower vocal congruence than cisgender participants across all experimental conditions, with the starkest difference in conditions where auditory feedback was present. This experience of incongruence appears to be modulated by interoceptive sensibility and alexithymia, with TGNC individuals reporting lower interoceptive trust and higher levels of alexithymia. Emotional awareness was positively linked to inner-voice congruence in the TGNC group. Additionally, aspects related to gender-minority stress predicted lower congruence. These findings highlight the complex interplay between gender identity, interoception, emotion regulation strategies, and voice perception.</p></abstract>
<kwd-group>
<kwd>alexithymia</kwd>
<kwd>gender identity</kwd>
<kwd>inner speech</kwd>
<kwd>interoception</kwd>
<kwd>TGNC</kwd>
<kwd>vocal congruence</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by Sapienza&#x00027;s Excellent Project Grant no. RG123188B09BDF14: &#x0201C;Ecological Technological, and Gender Concepts and their Variations Across the Lifespan - 2022-24&#x0201D; led by AB. CD was funded by the National Recovery and Resilience Plan (PNRR) DM no. 118, March 03, 2023. CF and AB were supported with funding from Next Generation EU, in the context of the National Recovery and Resilience Plan, Investment PE8 &#x02013; Project Age-It: &#x0201C;Ageing Well in an Ageing Society.&#x0201D; CM and AB were supported by the project PRIN DECO (DEmocratizing COncepts), protocol no. P2022ARREH.</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="5"/>
<equation-count count="0"/>
<ref-count count="121"/>
<page-count count="18"/>
<word-count count="15344"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Perception</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>In the introduction of this paper, we first address how humans&#x00027; bodily sense of self develops, highlighting the role of interoceptive, social, and environmental aspects. We then argue that the voice constitutes a central sensorimotor trait in which individual and social dimensions intersect, representing a crucial component of self-identity. In this context, we discuss the mechanisms underlying the experience of vocal congruence across different modes of voice perception. Finally, we examine how vocal congruence may vary across populations with different gender identities, which constitutes the primary aim of the present study, and we describe the behavioral self-voice perception paradigm employed to investigate this question.</p>
<sec>
<label>1.1</label>
<title>Bodily self-concept, interoception, and social context</title>
<p>Bodies serve as the primary interface with the external world and constitute the foundation of the human sense of self. Body awareness arises from the ability to integrate multisensory information from the inside and outside of the body (<xref ref-type="bibr" rid="B109">Tsakiris et al., 2011</xref>), and the development of body awareness begins with the body ownership, which involves the primal recognition that one&#x00027;s body is the source of sensations, and the sense of agency (<xref ref-type="bibr" rid="B110">Tsakiris et al., 2006</xref>). Human sense of body ownership is dynamically influenced by both internal sensations and external interactions (<xref ref-type="bibr" rid="B62">Martel et al., 2016</xref>; <xref ref-type="bibr" rid="B109">Tsakiris et al., 2011</xref>), and distinguishing between them is a fundamental yet complex task (<xref ref-type="bibr" rid="B119">Zaidel and Salomon, 2023</xref>). Interoceptive sensibility can be considered as an expression of a high-level model or &#x0201C;belief&#x0201D; for generating predictions about information coming from inside the body (<xref ref-type="bibr" rid="B6">Barrett and Simmons, 2015</xref>; <xref ref-type="bibr" rid="B22">Critchley and Garfinkel, 2017</xref>; <xref ref-type="bibr" rid="B84">Pezzulo, 2014</xref>; <xref ref-type="bibr" rid="B100">Seth, 2013</xref>).</p>
<p>Attention plays a role in interoceptive processes by modulating the flow between top-down prior beliefs and bottom-up signal flow (<xref ref-type="bibr" rid="B100">Seth, 2013</xref>). This attentional process on the ongoing bodily experience can be adaptive, but proneness to rumination&#x02014;i.e., maladaptive repetitive thinking about past events&#x02014;on somatic cues can lead to maladaptive behaviors (<xref ref-type="bibr" rid="B73">Naraindas et al., 2023</xref>; <xref ref-type="bibr" rid="B67">Mehling et al., 2012</xref>, <xref ref-type="bibr" rid="B65">2018</xref>). Beyond internal experience, expectations and beliefs also influence body awareness. Beliefs about the body are frequently associated with cultural-specific representations about a desired body (<xref ref-type="bibr" rid="B15">Cattarin et al., 2000</xref>; <xref ref-type="bibr" rid="B94">Ricciardelli et al., 2007</xref>; <xref ref-type="bibr" rid="B107">Thompson et al., 2005</xref>), which might lead to experiences of anxiety and avoidance of social situations.</p>
<p>Novel evidence indeed supports the idea of a deep interconnection between body awareness and self-awareness, and between social interaction and body-awareness (<xref ref-type="bibr" rid="B27">De Vignemont, 2023</xref>). Interoception gives the concept of self a solid foundation, enhancing its coherence over time by reducing its susceptibility to external influences (see <xref ref-type="bibr" rid="B3">Arnold et al., 2019</xref> for a review). On the other hand, social situations may reduce interoceptive processing by shifting attention from internally- to externally-focused. Higher interoceptive abilities would mediate the ability to &#x0201C;self-regulate&#x0201D; in social situations by flexibly directing attention toward both internal and external information; the link between interoception and the social self is thus bi-directional (<xref ref-type="bibr" rid="B71">Monti et al., 2022</xref>).</p>
</sec>
<sec>
<label>1.2</label>
<title>The voice as a sensorimotor trait of self and identity</title>
<p>The sound of voice is embroidered in human body schema, and the ability to discriminate between self- and non-self voice cues is fundamental for self-awareness and self-monitoring during verbal communication (<xref ref-type="bibr" rid="B13">Candini et al., 2018</xref>; <xref ref-type="bibr" rid="B20">Conde et al., 2018</xref>; <xref ref-type="bibr" rid="B32">Fernyhough and Russell, 1997</xref>; <xref ref-type="bibr" rid="B33">Graux et al., 2015</xref>; <xref ref-type="bibr" rid="B117">Xu et al., 2013</xref>). Rather than being just a vehicle of verbal communication, the voice can be considered a salient marker of individuality (<xref ref-type="bibr" rid="B8">Belin et al., 2004</xref>; <xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>), and centers on a domain where the body intersects with cultural processes of socialization and identity formation (<xref ref-type="bibr" rid="B121">Zimman, 2018</xref>). The voice is both an inner bodily signal (i.e., the embodied experience of perceiving oneself speaking) and a means by which individuals transmit information about themselves to interact with others. As such, the voice serves as a bridge between the self and others.</p>
<p>Perceiving one&#x00027;s voice is part of human everyday experience. Specifically, the experience of speech production requires the multimodal integration of different sources of information: auditory, proprioceptive, tactile, and barometric (<xref ref-type="bibr" rid="B51">Kent, 2024</xref>; <xref ref-type="bibr" rid="B44">Ito et al., 2009</xref>). The internal model of speech, or <italic>somatorepresentation</italic>, reflects how we experience and monitor speech-related sensations (<xref ref-type="bibr" rid="B35">Haggard and de Boer, 2014</xref>). This system continually adjusts speech output by comparing expected and actual sensory input, updating its internal models to reduce mismatches (<xref ref-type="bibr" rid="B51">Kent, 2024</xref>). This is particularly important given that individuals experience their own voice in multiple, qualitatively distinct ways. These range from everyday overt speech, to the perception of one&#x00027;s voice during playback of recorded speech (e.g., audio messages), to the experience of interacting with themselves through inner speech (<xref ref-type="bibr" rid="B1">Alderson-Day and Fernyhough, 2015</xref>; <xref ref-type="bibr" rid="B58">L&#x00153;venbruck et al., 2018</xref>; <xref ref-type="bibr" rid="B72">Morin, 2005</xref>), or silently reading (<xref ref-type="bibr" rid="B53">Kunz et al., 2025</xref>; <xref ref-type="bibr" rid="B82">Perrone-Bertolotti et al., 2012</xref>; <xref ref-type="bibr" rid="B113">Vilhauer, 2016</xref>; <xref ref-type="bibr" rid="B118">Yao et al., 2011</xref>). Despite the differing sensory and cognitive demands of these contexts, individuals typically experience a stable and unified sense of vocal identity.</p>
</sec>
<sec>
<label>1.3</label>
<title>Vocal congruence and the embodied experience of voice</title>
<p>Although the literature mentioned above has focused on the role of the voice in shaping self-identity and social interactions, not much is known about the mechanism underlying the perception of different instances of one&#x00027;s voice.</p>
<p>A recent survey of 1,522 U.S. participants revealed that nearly 40% of respondents reported discontent with their voice, with no correlation to voice disorders (<xref ref-type="bibr" rid="B75">Naunheim et al., 2023</xref>). A recent update (<xref ref-type="bibr" rid="B74">Naunheim et al., 2024</xref>) showed an increase in voice complaints from 2012 to 2022, suggesting a possible association between increased voice dissatisfaction and the increased habits of self-recording audio-video contents and remote work, which became widespread globally between and after the COVID-19 pandemic. This incongruence arises from the physical transformation that the voice goes through while speaking: hearing one&#x00027;s voice is mediated both by air and bone conduction, while the voice other people hear is only conducted through air (<xref ref-type="bibr" rid="B77">Orepic et al., 2023</xref>; <xref ref-type="bibr" rid="B86">P&#x000F6;rschmann, 2000</xref>; <xref ref-type="bibr" rid="B104">Stenfelt, 2016</xref>).</p>
<p>In order to define the connection between the embodied experience of having a voice and the auditory experience of hearing one&#x00027;s voice, Crow and colleagues introduced the concept of <italic>vocal congruence</italic>, defined as &#x0201C;&#x02026;the extent to which one&#x00027;s voice is in alignment, or congruent, with one&#x00027;s sense of self&#x0201D; (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>, p. 1). To measure this process, they designed a self-report measure of vocal congruence, the Vocal Congruence Scale (VCS). The instrument uses a five-point Likert scale to investigate the degree of individual identification with the voice and the beliefs about the reflection between the voice and the personhood. In addition, the authors showed through a heartbeat detection task (i.e., a classical interoceptive task in which participants are required to judge whether heartbeat sensations are simultaneous with external stimuli presented at different time delays) that the scale has moderate to low correlations with metacognitive judgments of interoceptive awareness and confidence, suggesting that voice perception is related to consciously focusing attention and reflecting upon bodily experience.</p>
<p>This scale captures different faces of the vocal congruence experience, ranging from voice ownership, agency, and control, to functional vocal use and expressiveness in social contexts. Crucially, it addresses identity-related dimensions of vocal congruence such as voice-identity congruence and alignment with one&#x00027;s gender identity. Finally, the scale also investigates metacognitive and evaluative components, including awareness and satisfaction and/or rumination with one&#x00027;s voice. Vocal congruence can thus be understood as a multifaceted phenomenon arising from the perceptual experience of receiving auditory vocal feedback that can match or mismatch with one&#x00027;s intended vocal production. This perceptual mismatch may trigger both an evaluative and metacognitive judgment, as well as an affective response to this experience.</p>
</sec>
<sec>
<label>1.4</label>
<title>Voice perception and TGNC identities</title>
<p>Gender plays an important role in shaping the individual&#x00027;s bodily experience. Gender identity is typically conceptualized as a person&#x00027;s inherent sense of their own gender, and how they identify as a woman, a man, both, an alternative gender, or neither (<xref ref-type="bibr" rid="B2">American Psychological Association, 2015</xref>). Here, in line with previous literature, we use the label TGNC to refer to transgender and gender nonconforming individuals, i.e., people whose gender identity varies from assumptions based on their birth sex (<xref ref-type="bibr" rid="B2">American Psychological Association, 2015</xref>). Although this label is not comprehensive, we believe it provides a valuable and respectful shorthand for referring to a diverse range of gender identities.</p>
<p>TGNC individuals face continuous discrimination, inequality, and social stigma in all aspects of their lives (<xref ref-type="bibr" rid="B14">Carmel and Erickson-Schroth, 2016</xref>; <xref ref-type="bibr" rid="B21">Connolly et al., 2016</xref>; <xref ref-type="bibr" rid="B30">Drabish and Theeke, 2022</xref>; <xref ref-type="bibr" rid="B85">Pinna et al., 2022</xref>; <xref ref-type="bibr" rid="B95">Romani et al., 2021</xref>; <xref ref-type="bibr" rid="B96">Russell and Fish, 2016</xref>; <xref ref-type="bibr" rid="B105">Testa et al., 2015</xref>; <xref ref-type="bibr" rid="B108">Truszczynski et al., 2022</xref>). Previous research highlighted the higher prevalence of alexithymia, body-image disorders, somatization, body uneasiness, and emotion dysregulation within the TGNC population compared to cisgender peers (<xref ref-type="bibr" rid="B11">Budge, 2020</xref>; <xref ref-type="bibr" rid="B39">Hatzenbuehler, 2009</xref>; <xref ref-type="bibr" rid="B48">Kallitsounaki and Williams, 2023</xref>; <xref ref-type="bibr" rid="B64">McGuire et al., 2016</xref>; <xref ref-type="bibr" rid="B69">Mirabella et al., 2020</xref>, <xref ref-type="bibr" rid="B68">2024a</xref>; <xref ref-type="bibr" rid="B61">Maniaci et al., 2024</xref>; <xref ref-type="bibr" rid="B63">Mazzoli et al., 2022</xref>; <xref ref-type="bibr" rid="B105">Testa et al., 2015</xref>). Body dissatisfaction among individuals with gender incongruence is a major source of distress, encompassing psychological, physical, and biological dimensions and leading to behaviors such as avoidance, body surveillance, and feelings of detachment from the body (<xref ref-type="bibr" rid="B69">Mirabella et al., 2020</xref>, <xref ref-type="bibr" rid="B68">2024a</xref>). Discomfort may involve not just secondary sexual characteristics but also non-sexual body parts and may represent a profound concern about gender discrepancies and societal norms (<xref ref-type="bibr" rid="B90">Pu et al., 2025</xref>).</p>
<p>The voice is one of the bodily aspects most commonly reported by TGNC people as a source of incongruence and distress (<xref ref-type="bibr" rid="B16">Chadwick et al., 2022</xref>; <xref ref-type="bibr" rid="B50">Kennedy and Thibeault, 2020</xref>; <xref ref-type="bibr" rid="B45">James et al., 2016</xref>; <xref ref-type="bibr" rid="B76">Oestreicher-Kedem et al., 2024</xref>; <xref ref-type="bibr" rid="B90">Pu et al., 2025</xref>; <xref ref-type="bibr" rid="B111">van de Grift et al., 2016a</xref>,<xref ref-type="bibr" rid="B112">b</xref>; <xref ref-type="bibr" rid="B120">Ziltzer et al., 2023</xref>). Vocal cues can disclose information related to personal identity like provenience, age, or gender, and for TGNC people, they can represent possible unwanted episodes of exposure of their birth sex (<xref ref-type="bibr" rid="B17">Chang and Yung, 2021</xref>). TGNC individuals report the need to find and develop a voice and communication that reflect their individual&#x00027;s sense of gender, and often seek speech therapists since their outer voice does not match with their &#x0201C;inner&#x0201D; and &#x0201C;true&#x0201D; voice (<xref ref-type="bibr" rid="B25">Davies et al., 2015</xref>). In fact, <italic>gender dysphonia</italic> (i.e., the perception of one&#x00027;s gender identity as being inconsistent with the qualities of one&#x00027;s voice and communication; <xref ref-type="bibr" rid="B26">De Bruin et al., 2000</xref>; <xref ref-type="bibr" rid="B50">Kennedy and Thibeault, 2020</xref>), has been shown to impact the quality of life and everyday functioning of TGNC individuals (<xref ref-type="bibr" rid="B36">Hancock et al., 2011</xref>; <xref ref-type="bibr" rid="B37">Hancock and Pool, 2017</xref>; <xref ref-type="bibr" rid="B45">James et al., 2016</xref>; <xref ref-type="bibr" rid="B29">dos Santos Oliveira et al., 2024</xref>). These aspects intertwine with physiological impairments related to pitch, voice quality, inflections, resonance, and precision in the articulation, intensity, and prosody (<xref ref-type="bibr" rid="B25">Davies et al., 2015</xref>), as well as the psychological reactions to them (<xref ref-type="bibr" rid="B37">Hancock and Pool, 2017</xref>). Within people identifying as TGNC there is great variation in the extent to which voice changes are undertaken or desired. Some people within this community seek to develop both masculine and feminine speech patterns, either because they identify as bigender or due to external pressures preventing them from fully expressing their gender identity. Others may have a gender identity that does not fit within the traditional woman/man spectrum and desire a more flexible gender presentation.</p>
<p>Culturally inherited stereotypes about what is considered feminine or masculine are often enacted through language (<xref ref-type="bibr" rid="B18">Charlesworth et al., 2021</xref>; <xref ref-type="bibr" rid="B47">Jones et al., 2020</xref>; <xref ref-type="bibr" rid="B57">Lindqvist et al., 2019</xref>; <xref ref-type="bibr" rid="B56">Lewis and Lupyan, 2020</xref>; <xref ref-type="bibr" rid="B99">Sczesny et al., 2016</xref>; <xref ref-type="bibr" rid="B101">Skewes et al., 2018</xref>). Studies with priming paradigms showed that linguistic gender cues can impact both explicit and implicit associations and attitudes of stereotyped group members (<xref ref-type="bibr" rid="B83">Pesciarelli et al., 2019</xref>; <xref ref-type="bibr" rid="B103">Steele and Ambady, 2006</xref>), and cause self-perception and social behavior to become more congruent to gender stereotypes (<xref ref-type="bibr" rid="B43">Hundhammer and Mussweiler, 2012</xref>). However, to date the impact of gendered stereotypes conveyed by language on vocal congruence remains underexplored. Deepening the complex interweaving between the internal bodily experience and cultural constraints might be crucial for the understanding of the TGNC people&#x00027;s experience of gender.</p>
</sec>
<sec>
<label>1.5</label>
<title>The current study</title>
<p>Little is known about how voice self-perception affects social interactions and, reciprocally, how living in a socio-cultural milieu&#x02014;where representations of gender identity within a cultural system can shape how individuals perceive themselves&#x02014;affects the experience of perceiving one&#x00027;s own one voice (<xref ref-type="bibr" rid="B42">Hughes and Harrison, 2013</xref>; <xref ref-type="bibr" rid="B80">Peng et al., 2019</xref>). To our knowledge, no existing study tackles the relationship between inner and outer voice perception across different gender identities. Furthermore, no research has systematically examined how gender stereotypes regarding the expected sound of female or male voices influence vocal congruence across diverse gender identities. This is striking since the experience of voice incongruence, i.e., the mismatch between the voice perceived while speaking and the actual voice that other people hear (or one&#x00027;s own recorded voice from a voice message), is a common everyday experience also for cisgender people (<xref ref-type="bibr" rid="B41">Holzman and Rousey, 1966</xref>).</p>
<p>In this study, we investigate vocal congruence across populations with different gender identities (cisgender and TGNC) using a behavioral self-voice perception task. We build on existing studies of vocal congruence by introducing an additional condition&#x02014;the perception of inner voice congruence&#x02014;to previously studied scenarios, such as reading aloud and listening to one&#x00027;s own recorded voice (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>; <xref ref-type="bibr" rid="B114">Welch and Helou, 2022</xref>). Importantly, participants are presented with excerpts of texts conveying either gender-stereotypical content (feminine vs. masculine) or gender-neutral content.</p>
<p>As our main prediction, we hypothesize differences in voice perception between Cisgender and TGNC participants. We expect the TGNC group to experience lower vocal congruence overall, with the lowest scores occurring in conditions that require focus on the externalized voice. Specifically, we expect TGNC participants to experience lower vocal congruence than cisgender participants during the Reading Aloud condition, which involves active speech motor control and simultaneous auditory-somatosensory feedback (including bone conduction). We also expect lower scores for TGNC participants in the Listening condition, which isolates the purely auditory-perceptual evaluation of the self-voice without motor intent. Conversely, we hypothesize that Silent Reading&#x02014;requiring an engagement of auditory imagery without an external feedback&#x02014;will elicit relatively higher congruence scores in the TGNC group, as it reflects a mental representation of vocal identity that is less mediated by the physical constraints or acoustic &#x0201C;mismatches&#x0201D; of externalized speech. A summary of the experimental conditions and their underlying processes is provided in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Comparison of experimental conditions by cognitive process and primary feedback.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Condition</bold></th>
<th valign="top" align="left"><bold>Process</bold></th>
<th valign="top" align="left"><bold>Primary feedback</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Silent reading</td>
<td valign="top" align="left">Auditory imagery</td>
<td valign="top" align="left">None</td>
</tr>
<tr>
<td valign="top" align="left">Reading aloud</td>
<td valign="top" align="left">Speech production</td>
<td valign="top" align="left">Auditory (air &#x0002B; bone) &#x00026; somatosensory</td>
</tr>
<tr>
<td valign="top" align="left">Listening</td>
<td valign="top" align="left">Auditory perception</td>
<td valign="top" align="left">Auditory (air only)</td>
</tr></tbody>
</table>
</table-wrap>
<p>Second, to investigate how gendered linguistic primes influence vocal self-perception, we manipulate the semantic content of the texts, presenting excerpts that are gender-stereotypical (masculine or feminine) and gender-neutral. We predict that the semantic content of the texts will interact with participants&#x00027; gender identity, such that gendered primes will reduce vocal congruence for TGNC individuals by reinforcing stereotypical gender associations. Although a stricter test of the priming effect should have targeted specific gender identities within each group paired with the relevant linguistic stereotype, the scarcity of the sample prevented us from deepening this aspect. Nonetheless, we still expected the gendered semantic content of texts would specifically impact on TGNC participants&#x00027; vocal congruence perception because it might draw participants&#x00027; attention to the general gender conceptual dimension.</p>
<p>Prior research suggests that interoceptive sensibility may play an important role in voice perception (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>; <xref ref-type="bibr" rid="B78">Orepic et al., 2022</xref>; <xref ref-type="bibr" rid="B102">Smeltzer et al., 2023</xref>). Building on this, and also considering the high prevalence of emotion regulation difficulties&#x02014;such as alexithymia&#x02014;among TGNC individuals (<xref ref-type="bibr" rid="B48">Kallitsounaki and Williams, 2023</xref>; <xref ref-type="bibr" rid="B63">Mazzoli et al., 2022</xref>; <xref ref-type="bibr" rid="B93">Reed et al., 2023</xref>), the present study examines a broader set of psychological factors that might influence the experience of vocal congruence across different gender identities. Specifically, we investigate the role of interoceptive sensibility in shaping vocal congruence, as well as the potential disruptive effect of alexithymia and emotion regulation difficulties. In addition we explore the relation between ontological beliefs about gender/sex and vocal incongruence. Finally, we also investigate whether experiences of gender-related discrimination in the TGNC sample influence vocal congruence perception.</p>
</sec>
<sec>
<label>1.6</label>
<title>Data availability</title>
<p>All the materials, data, scripts, and analyses of the study are available at the OSF repository: <ext-link ext-link-type="uri" xlink:href="https://osf.io/v2gtn/">https://osf.io/v2gtn/</ext-link>.</p>
</sec>
</sec>
<sec id="s2">
<label>2</label>
<title>Methods</title>
<sec>
<label>2.1</label>
<title>Participants</title>
<p>A total of 45 participants were recruited for the study. A sensitivity power analysis was conducted using G&#x0002A;Power software (<xref ref-type="bibr" rid="B31">Faul et al., 2007</xref>) to determine the minimum detectable effect size given the observed sample size. For the primary Condition &#x000D7; Group interaction, the study had 80% power to detect an effect size of <italic>f</italic> = 0.50 (&#x003B7;<sup>2</sup> =0.20) at &#x003B1; =0.05. <italic>Post-hoc</italic> approximations for between-group comparisons indicated adequate power to detect medium-to-large effects (<italic>d</italic> = 0.85, power = 87%). After excluding one participant due to failure to complete the questionnaires, the resulting sample size was <italic>N</italic> = 44 for all subsequent statistical analyses (<italic>M Age</italic> = 27.45; <italic>SD Age</italic> = 10.22; Age range = 18&#x02013;66). We recruited participants through the involvement of an LGBTQIA&#x0002B; rights local association and a hospital service dedicated to gender-affirmation processes. Trainer vocalists and individuals at advanced stages of the gender-affirmation process who had undergone voice training interventions were not eligible. This was ensured prior to participants&#x00027; enrollment in the study. We asked for the following demographic information: age, birth sex, gender identity, sexual orientation, educational level, birth country, and languages spoken from childhood. Participants&#x00027; demographic information is reported in <xref ref-type="table" rid="T2">Table 2</xref>. Most participants (57%, <italic>n</italic> = 25) were female at birth, whereas 41% (<italic>n</italic> = 18) were male at birth and one participant was intersex. Based on gender identity, we divided participants into two groups: TGNC participants (<italic>n</italic> = 22) and participants who identified as cisgender (<italic>n</italic> = 22). In the cisgender group, 55% of the participants identified as cisgender women (<italic>n</italic> = 12) and 45% as cisgender men (<italic>n</italic> = 10). In the TGNC group, 32% of the participants identified as transgender men (<italic>n</italic> = 7), 23% as transgender women (<italic>n</italic> = 5), and 23% as non-binary (<italic>n</italic> = 5). Four participants (18%) used the &#x0201C;other&#x0201D; response option and reported they identified as &#x0201C;<italic>genderfluid transgender man</italic>,&#x0201D; &#x0201C;<italic>non-binary transgender man</italic>,&#x0201D; &#x0201C;<italic>genderfluid</italic>,&#x0201D; &#x0201C;<italic>woman</italic>.&#x0201D; The two groups were comparable in terms of age, <italic>t</italic><sub>(36.33)</sub> = &#x02212;0.087, <italic>p</italic> = 0.930 (<italic>M</italic> cisgender = 27.59; <italic>SD</italic> = 8.04; <italic>M</italic> TGNC = 27.31; <italic>SD</italic> = 12.21).</p>
<table-wrap position="float" id="T2">
<label>Table 2</label>
<caption><p>Socio-demographic information of the sample (<italic>n</italic> = 44).</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Birth sex</bold></th>
<th valign="top" align="center"><bold><italic>n</italic> (%)</bold></th>
<th valign="top" align="left"><bold>Gender identity</bold></th>
<th valign="top" align="center"><bold><italic>n</italic> (%)</bold></th>
<th valign="top" align="left"><bold>Education</bold></th>
<th valign="top" align="center"><bold><italic>n</italic> (%)</bold></th>
<th valign="top" align="left"><bold>Birth Country</bold></th>
<th valign="top" align="center"><bold><italic>n</italic> (%)</bold></th>
<th valign="top" align="left"><bold>Italian proficiency</bold></th>
<th valign="top" align="center"><bold><italic>n</italic> (%)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Female</td>
<td valign="top" align="center">25 (57%)</td>
<td valign="top" align="left">Cisgender woman</td>
<td valign="top" align="center">13 (30%)</td>
<td valign="top" align="left">Second primary school</td>
<td valign="top" align="center">4 (9%)</td>
<td valign="top" align="left">Italy</td>
<td valign="top" align="center">43 (99%)</td>
<td valign="top" align="left">First language</td>
<td valign="top" align="center">44 (100%)</td>
</tr>
<tr>
<td valign="top" align="left">Male</td>
<td valign="top" align="center">18 (41%)</td>
<td valign="top" align="left">Cisgender man</td>
<td valign="top" align="center">10 (23%)</td>
<td valign="top" align="left">High school</td>
<td valign="top" align="center">19 (43%)</td>
<td valign="top" align="left">Romania</td>
<td valign="top" align="center">1 (1%)</td>
<td valign="top" align="left">Other languages spoken from childhood</td>
<td valign="top" align="center">3 (7%)</td>
</tr>
<tr>
<td valign="top" align="left">Intersex</td>
<td valign="top" align="center">1 (2%)</td>
<td valign="top" align="left">Non-binary</td>
<td valign="top" align="center">5 (11%)</td>
<td valign="top" align="left">Bachelor degree</td>
<td valign="top" align="center">4 (9%)</td>
<td/>
<td/>
<td/>
<td/>
</tr>
<tr>
<td/>
<td/>
<td valign="top" align="left">Transgender woman</td>
<td valign="top" align="center">5 (11%)</td>
<td valign="top" align="left">Master degree</td>
<td valign="top" align="center">11 (25%)</td>
<td/>
<td/>
<td/>
<td/>
</tr>
<tr>
<td/>
<td/>
<td valign="top" align="left">Transgender man</td>
<td valign="top" align="center">7 (16%)</td>
<td valign="top" align="left">PhD</td>
<td valign="top" align="center">6 (14%)</td>
<td/>
<td/>
<td/>
<td/>
</tr>
<tr>
<td/>
<td/>
<td valign="top" align="left">Other</td>
<td valign="top" align="center">4(9%)</td>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
</tr>
<tr>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
<td/>
</tr></tbody>
</table>
</table-wrap>
<p>In the cisgender group, one participant completed only middle school (5%), 36% (<italic>n</italic> = 8) of participants reported having a high school diploma, one participant obtained a bachelor degree (5%), 36% (<italic>n</italic> = 8) a master&#x00027;s degree, and 18% (<italic>n</italic> = 4) a PhD title. In the TGNC group, 14% (<italic>n</italic> = 3) completed middle school, 50% (<italic>n</italic> = 11) of participants reported having a high school diploma, 36% (<italic>n</italic> = 3) had a bachelor&#x00027;s degree, 14% (<italic>n</italic> = 3) had a master&#x00027;s degree, and 18% (<italic>n</italic> = 4) a PhD title (<xref ref-type="table" rid="T2">Table 2</xref>).</p>
</sec>
<sec>
<label>2.2</label>
<title>General procedure</title>
<p>The experimental procedure was divided into three parts, and participants completed it in two sessions. In the first session, participants completed the voice perception task to assess levels of vocal congruence. As an additional, separate part of the experimental procedure, participants also rated a set of words onn multiple semantic dimensions. However, these results are not discussed here, as they are not central to the aims of this study. Finally, they gave demographic information. In the second session, participants completed questionnaires investigating interoceptive sensibility, emotion regulation, alexithymia, gender identity, and gender discrimination. Questionnaires were administered 2 days after the completion of the first session to avoid fatigue and carryover effects of the voice perception task. The experimental procedure was implemented with Qualtrics, using an on-line questionnaire divided into sections that participants filled in a fixed order. The study obtained ethical approval from the Ethics Committee of the Sapienza University of Rome (Ethical Approval 0000856).</p>
</sec>
<sec>
<label>2.3</label>
<title>Session 1: vocal congruence assessment</title>
<sec>
<label>2.3.1</label>
<title>Materials</title>
<p>Four texts were chosen based on the presence or absence of a clear gender stereotypic representation of feminine and masculine gender identities. We selected the two gender-neutral texts from &#x0201C;Come vivevano i Greci&#x0201D; (<xref ref-type="bibr" rid="B79">Paoli, 1957</xref>) and from &#x0201C;Un mare di silenzio&#x0201D; <xref ref-type="bibr" rid="B92">Rava, (2012)</xref>, and the two gendered texts from &#x0201C;L&#x00027;abbecedario degli Stereotipi di Genere&#x0201D; (<xref ref-type="bibr" rid="B89">Priulla and Sammartino, 2020</xref>). Importantly, the former excerpts were selected from educational materials aimed at lower secondary school students, characterized by accessible language and designed to assess reading comprehension, while the latter were taken from an Italian educational booklet specifically dealing with gender stereotypes. In the gendered texts, stereotypical descriptions of gender identity and socialization for both feminine and masculine gender identities were presented to the participants. For example, the female text stated: &#x0201C;<italic>Even before children are born, many parents tend to adopt different attitudes based on the child&#x00027;s sex. If they believe it will be a girl, she is often imagined as gentle, sensitive, and naturally inclined toward relationships and married life</italic>.&#x0201D; The male text stated: &#x0201C;<italic>Even before children are born, many parents tend to adopt different attitudes based on the expected sex of the child. If they believe it will be a boy, they often imagine him as athletic, success-driven, strong, and independent</italic>.&#x0201D; All the texts had the same length (see <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref>).</p>
<list list-type="bullet">
<list-item><p><italic>Vocal Congruence Scale</italic> (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>). We administered a self-report questionnaire (0 = strongly disagree; 4 = strongly agree) designed to measure vocal congruence. The questionnaire was back-translated, and the scoring was adapted for this study (1 = strongly disagree; 5 = strongly agree). The scale comprises 10 items assessing various facets of the alignment between the voice and the self, including voice ownership, functional control, voice-identity congruence, voice agency and control, satisfaction, awareness/rumination about one&#x00027;s voice, voice clarity, communicative effectiveness, emotional expression, and gender congruence (see <xref ref-type="table" rid="T3">Table 3</xref>). However, the original validation study did not report distinct subscales or a factor structure supporting the delineation of separate subdimensions. Therefore, we treated it as a unidimensional measure and calculated the total sum score of all items.</p></list-item>
<list-item><p><italic>Inner-Outer Voice Congruence Rating</italic>. To measure the effect of the type of text manipulation, after each text we administered a five-points Likert scale rating to test the congruency between the inner voice perceived during the Silent Reading and the voice heard during the Listening condition. For instructions about the rating scale, see <xref ref-type="table" rid="T4">Table 4</xref>.</p></list-item>
</list>
<table-wrap position="float" id="T3">
<label>Table 3</label>
<caption><p>Vocal congruence scale.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Italian translation</bold></th>
<th valign="top" align="left"><bold>Original English version (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>)</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><italic>Nel compito di produzione che ho appena terminato&#x02026;</italic><break/> &#x02026;<italic>sembrava che la mia voce appartenesse a me</italic>.</td>
<td valign="top" align="left">In the speaking/listening task I just performed&#x02026; &#x02026;it seemed like my voice belonged to me.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>sembrava che la mia voce funzionasse normalmente</italic>.</td>
<td valign="top" align="left">&#x02026; it seemed like my voice was functioning as it normally does.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>sembrava che la mia voce riflettesse chi sono</italic>.</td>
<td valign="top" align="left">&#x02026; it seemed like my voice reflected who I am.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>sembrava avessi il controllo della mia voce</italic>.</td>
<td valign="top" align="left">&#x02026; it seemed like I was in control of my voice.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>sono soddisfatt<sup>&#x0002A;</sup> di come suonava la mia voce</italic>.</td>
<td valign="top" align="left">&#x02026; I am satisfied with how my voice sounded.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>stavo pensando al modo in cui suonava la mia voce</italic>.</td>
<td valign="top" align="left">&#x02026; I was thinking about the way my voice sounded.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>mi sento come se la mia voce suonasse chiara</italic>.</td>
<td valign="top" align="left">&#x02026; I feel as though my voice sounded clear.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>mi sento come se stessi parlando in modo facilmente comprensibile</italic>.</td>
<td valign="top" align="left">&#x02026; I feel as though I was speaking in a way that I was easily understood.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>mi sento come se la mia voce riflettesse accuratamente il mio stato d&#x00027;animo</italic>.</td>
<td valign="top" align="left">&#x02026; I feel as though my voice accurately reflected my mood.</td>
</tr>
<tr>
<td valign="top" align="left">&#x02026;<italic>mi sento come se la mia voce riflettesse il mio genere</italic>.</td>
<td valign="top" align="left">&#x02026; I feel as though my voice reflected my gender.</td>
</tr>
<tr>
<td valign="top" align="left"><italic>1, Fortemente in disaccordo; 2, In disaccordo; 3, Neutrale; 4, In accordo; 5, Fortemente in accordo</italic>.</td>
<td valign="top" align="center">0, Strongly disagree; 1, Disagree; 2, Neutral; 3, Agree; 4, Strongly agree.</td>
</tr></tbody>
</table>
<table-wrap-foot>
<p>The left column presents the Italian version of the scale, back-translated and adapted by the authors taking care of ensuring gender-inclusive language through the use of the desinence. <sup>&#x0002A;</sup>see e.g., <xref ref-type="bibr" rid="B70">Mirabella et al. (2024b)</xref>; the right column shows the original English version (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>).</p>
</table-wrap-foot>
</table-wrap>
<table-wrap position="float" id="T4">
<label>Table 4</label>
<caption><p>Instructions for the inner-outer voice congruence rating.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Original Italian version</bold></th>
<th valign="top" align="left"><bold>English translation</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left"><italic>Quanto ti sembra che la tua voce come la percepisci internamente corrisponda alla voce registrata? (1 = Per Nulla; 5 = Completamente)</italic></td>
<td valign="top" align="left">How much does your voice as you perceive it internally match your recorded voice? (1 = not at all; 5 = completely)</td>
</tr></tbody>
</table>
</table-wrap>
</sec>
<sec>
<label>2.3.2</label>
<title>Procedure</title>
<p>Participants were seated in front of a computer for the duration of the session. All recordings were carried out in person in a quiet room. Voice data were recorded using the built-in microphone of an HP Specter &#x000D7;360 laptop, which was used for all participants to maintain consistency across recordings. Before each session, an audio check was conducted to ensure clear signal quality without distortion or significant background noise. All recordings were played back on the same laptop using standardized volume settings for every participant.</p>
<p>Participants were presented with four texts (Gender-neutral text 1, Stereotypically Masculine text, Gender-neutral text 2, Stereotypically Feminine text) in a fixed order to avoid any carryover effects of gender manipulation. They had to complete a Silent Reading, a Reading Aloud and a Listening task for each text. The order of the three conditions was kept fixed to avoid carryover effects of both Reading Aloud and Listening conditions on the Silent Reading condition. In the Silent Reading condition, participants were instructed to read the text silently without time pressure. In the Reading Aloud condition, they were asked to read the exact text aloud and to record themselves while reading by using the online questionnaire&#x00027;s interface. In the Listening condition, participants were asked to listen to their recorded voice; they could stop listening at any moment. After each condition, participants were presented with an Italian version of the Vocal Congruence Scale (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>; see <xref ref-type="table" rid="T3">Table 3</xref>). At the end of each text block, they were asked to rate the perceived congruence between the inner voice heard during the Silent Reading and the voice heard during the Listening condition (see <xref ref-type="table" rid="T4">Table 4</xref>). We added this explicit question to understand better the priming role of gender-stereotyped text in the experience of vocal congruence. After the experimental task, participants provided their socio-demographic information. Finally, the questionnaires were administered 2 days after.</p>
</sec>
</sec>
<sec>
<label>2.4</label>
<title>Session 2: questionnaires</title>
<sec>
<label>2.4.1</label>
<title>Materials and procedure</title>
<p>In the last session, held 2 days after the vocal congruence assessment, all the participants were asked to complete the following set of questionnaires via Qualtrics: (a) <italic>The Multidimensional Assessment of Interoceptive Awareness</italic> (<xref ref-type="bibr" rid="B66">Mehling et al., 2012</xref>) to investigate the potential influence of interoceptive sensibility on vocal congruence; (b) the Emotion Regulation Questionnaire (<xref ref-type="bibr" rid="B34">Gross and John, 2003</xref>), to assess participants&#x00027; typical cognitive strategies for managing emotions; (c) the Toronto Alexithymia Scale (<xref ref-type="bibr" rid="B4">Bagby et al., 1994</xref>), to control for individual differences in alexithymia; and (d) the Multi-Gender Identity Questionnaire (<xref ref-type="bibr" rid="B46">Joel et al., 2014</xref>) to explore the degree of binary vs. flexible gender identity among participants. Additionally, TGNC participants completed the Italian adaptation of the Italian adaptation of the Gender Minority Stress and Resilience Measure (<xref ref-type="bibr" rid="B97">Scandurra et al., 2020</xref>; <xref ref-type="bibr" rid="B105">Testa et al., 2015</xref>; Italian) assessing gender-related discrimination, as well as the Italian versions of the Italian adaptation of the Transsexual Voice Questionnaire for Male-to-Female Transsexuals (TVQ<sup>MtF</sup>, <xref ref-type="bibr" rid="B24">Dacakis et al., 2013</xref>) and Transsexual Voice Questionnaire for Female-to-Male Transsexuals (TVQ<sup>FtM</sup>, <xref ref-type="bibr" rid="B24">Dacakis et al., 2013</xref>; <xref ref-type="bibr" rid="B52">Kreukels et al., 2012</xref>) to further investigate voice-related functioning in transgender women and men, respectively.</p>
<p>While a broader set of questionnaires was administered, the analyses presented in this work focuses on four measures described below&#x02014;MAIA, TAS-20, MULTI-GIQ, and GMSR&#x02014;selected a priori for their relevance to the research questions.</p>
<list list-type="bullet">
<list-item><p><italic>The Multidimensional Assessment of Interoceptive Awareness</italic> (<xref ref-type="bibr" rid="B66">Mehling et al., 2012</xref>). We used this 32-items self-report questionnaire, which assesses interoceptive awareness across eight constructs: (i) <italic>Noticing</italic>: awareness of uncomfortable, comfortable, or neutral body sensations; (ii) <italic>Non-Distracting</italic>: avoiding distraction to cope with bodily discomfort; (iii) <italic>Not-Worrying</italic>: tendency not to experience emotional distress about bodily discomfort; (iv) <italic>Attention Regulation</italic>: ability to sustain and control attention on the body; (v) <italic>Emotional Awareness</italic>: internal process involving the ability to attribute specific physical sensations to physiological manifestations of emotions; (vi) <italic>Self-Regulation</italic>: ability to regulate distress by attention to body sensations; (vii) <italic>Body-Listening</italic>: tendency to actively attend to body signals for insights; (viii) <italic>Trust</italic>: experience of one&#x00027;s body as safe and trustworthy. Each construct is rated on a scale from 0 to 5, with higher scores indicating greater interoceptive awareness. Not-Distracting and Not-Worrying sub-scales have reverse scored items. For the purpose of this study, we used the Italian adaptation of the MAIA questionnaire (<xref ref-type="bibr" rid="B12">Cal&#x000EC; et al., 2015</xref>).</p></list-item>
<list-item><p><italic>The Toronto Alexithymia Scale</italic> (<xref ref-type="bibr" rid="B4">Bagby et al., 1994</xref>). We assessed alexithymia with the Italian version (<xref ref-type="bibr" rid="B10">Bressi et al., 1996</xref>) of the 20-item Toronto Alexithymia Scale (TAS-20; <xref ref-type="bibr" rid="B4">Bagby et al., 1994</xref>). The TAS-20 is a 5 points Likert scale comprising three subscales: (i) difficulty identifying feelings (DIF) measures an individual&#x00027;s ability to recognize and identify their own emotional states; (ii) difficulty describing feelings (DDF) assesses the ability to verbally express and describe their emotions; (iii) externally oriented thinking (EOT) gauges the tendency to focus attention externally rather than internally. While factor subscales have been used in previous studies, recent research favors a single total score (<xref ref-type="bibr" rid="B5">Bagby et al., 2020</xref>).</p></list-item>
<list-item><p><italic>The Multi-Gender Identity Questionnaire</italic> (<xref ref-type="bibr" rid="B46">Joel et al., 2014</xref>). The Multi-Gender Identity Questionnaire (Multi-GIQ) consists of 24 questions, some of which are gender-neutral and others that are specifically designed for women and men participants. Answers were marked over a five-point Likert scale ranging from &#x0201C;Never&#x0201D; (0) to &#x0201C;Always&#x0201D; (4). A &#x0201C;Not relevant&#x0201D; item is present where necessary (e.g., the question: &#x0201C;In the past 12 months, have you had the wish or desire to be a man?&#x0201D; is not relevant for men). The scale investigates the following nine dimensions: (i) Feeling as a woman (Q3, Q14); (ii) Feeling as a man (Q4, Q13); (iii) Feeling as both genders (Q15, Q16); (iv) Feeling as neither gender (Q17); (v) Satisfaction being the affirmed gender (Q2 and Q1 for men and women, respectively); (vi) Wishing to be the &#x0201C;other&#x0201D; gender (Q21 and Q20 for men and women, respectively); (vii) Dislike of the sexed body (Q23 and Q22 for male- and female-assigned participants, respectively); (viii) Wishing to have the body of the &#x0201C;other&#x0201D; sex (Q24). Questions Q11 and Q12, pertain to buying and wearing clothes of the &#x0201C;other&#x0201D; sex. For the purpose of this study, the questionnaire was back-translated in Italian.</p></list-item>
</list>
<p>In addition to these, we also presented the TGNC group with the Italian adaptation of the <italic>Gender Minority Stress and Resilience Measure</italic> (<xref ref-type="bibr" rid="B105">Testa et al., 2015</xref>; Italian: <xref ref-type="bibr" rid="B97">Scandurra et al., 2020</xref>). The GMSR scale is composed of 58 Likert-scaled items&#x00027; and measures distal stressors (discrimination, rejection, victimization, and non-affirmation), proximal stressors (internalized transphobia, negative expectations, and non-disclosure), and resilience factors (pride and community connectedness) TGNC. The scale is composed of a nine-factors: Gender-Related Discrimination (five items), Gender-Related Rejection (six items), Gender-Related Victimization (six items), Non-affirmation of Gender Identity (six items), Internalized Transphobia (eight items), Pride (eight items), Negative Expectations for Future Events (nine items), Non-disclosure (five items), and Community Connectedness (five items).</p>
</sec>
</sec>
</sec>
<sec id="s3">
<label>3</label>
<title>Data analysis</title>
<p>Data preprocessing, analysis, and visualization were conducted using R (<xref ref-type="bibr" rid="B91">R Core Team, 2023</xref>) within the RStudio environment (<xref ref-type="bibr" rid="B87">Posit Team, 2025</xref>). Data preprocessing was performed using the &#x0201C;dplyr&#x0201D; package (<xref ref-type="bibr" rid="B116">Wickham et al., 2023</xref>), and data distributions were visualized using the &#x0201C;ggplot2&#x0201D; package (<xref ref-type="bibr" rid="B115">Wickham, 2016</xref>).</p>
<sec>
<label>3.1</label>
<title>Vocal congruence task</title>
<p>The distribution of VCS total scores was assessed using visual inspection of histograms and Q&#x02013;Q plots alongside formal normality tests. The distribution appeared approximately unimodal and bell-shaped, with observations primarily concentrated in the mid-range. While the Shapiro-Wilk test (via &#x0201C;<italic>stats</italic>&#x0201D; R&#x00027;s Package) indicated a statistically significant deviation from normality (<italic>W</italic> = 0.9746, <italic>p</italic> &#x0003C; 0.001), visual diagnostics revealed that this deviation was primarily confined to the extreme tails (please refer to <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref>), reflecting the bounded nature of the 10&#x02013;50 sum score.</p>
<p>Given that linear mixed-effects models are robust to moderate departures from normality (<xref ref-type="bibr" rid="B98">Schielzeth et al., 2020</xref>), VCS total scores were analyzed using linear mixed-effects models (via the &#x0201C;<italic>lme4</italic>&#x0201D; package in R; <xref ref-type="bibr" rid="B7">Bates et al., 2015</xref>).</p>
<p>As a preliminary step, we conducted within-participant comparisons to assess whether the two gender-neutral texts differed in their effects on vocal congruence, and whether the two gendered texts differed from each other. Given the non-normal distribution of vocal congruence sum scores, we used Wilcoxon rank-sum tests with continuity correction. Results indicated no significant difference between the two gender-neutral texts (<italic>W</italic> = 8,529.5, <italic>p</italic> = 0.769), nor between the two gendered texts (<italic>W</italic> = 8,671.5, <italic>p</italic> = 0.948).</p>
<p>Because the primary level of analysis in the present study contrasts neutral vs. masculine vs. feminine texts, and only the neutral condition included two empirically equivalent texts, we averaged vocal congruence scores across the two neutral texts within participants. This approach preserves all available data while ensuring that the neutral condition contributes to a single, stable estimate per participant, comparable to the masculine and feminine conditions.</p>
<sec>
<label>3.1.1</label>
<title>Model structure</title>
<p>Vocal Congruence Scale sum scores for each Type of Text and for each Condition were used for the main model as the dependent variable. Group (TGNC vs. cisgender), Type of Text (Averaged Gender-neutral vs. Stereotypically Feminine vs. Stereotypically Masculine), Condition (Silent Reading vs. Reading Aloud vs. Listening), and their three way interaction were inserted in the model as fixed factors. To account for inter-individual variability in baseline vocal congruence, we included participants as a random intercept (1| Participant). <italic>Post-hoc</italic> contrasts were carried out with the &#x0201C;<italic>emmeans</italic>&#x0201D; R&#x00027;s package (<xref ref-type="bibr" rid="B54">Lenth, 2024</xref>) using Tukey&#x00027;s adjustment for multiple comparisons.</p>
<p>Model selection was performed by comparing an additive model (Group &#x0002B; Condition &#x0002B; Text) against a full factorial model including interactions (Group <sup>&#x0002A;</sup> Condition <sup>&#x0002A;</sup> Text). A Likelihood Ratio Test indicated that the inclusion of interaction terms significantly improved model fit, <inline-formula><mml:math id="M1"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>12</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 51.38, <italic>p</italic> &#x0003C; 0.001, <italic>AIC no interaction</italic> = 2,531.6, <italic>AIC interaction</italic> = 2,504.2, suggesting that the added complexity was justified by the increased explanatory power.</p>
<p>Model assumptions were verified using the &#x0201C;<italic>DHARMa</italic>&#x0201D; R package (<xref ref-type="bibr" rid="B38">Hartig, 2024</xref>) to assess scaled residuals, which revealed no significant evidence of overdispersion, outliers, or deviations from the expected distribution (please refer to <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref>).</p>
<p>To control for potential order effects resulting from the fixed sequence of conditions and texts, we included global trial order as a covariate in a supplementary linear mixed-effects model, in interaction with the Group. The inclusion of trial order did not significantly improve model fit, <inline-formula><mml:math id="M2"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 1.856, <italic>p</italic> = 0.395. Results revealed a non-significant negative main effect of trial order (&#x003B2; = &#x02212;0.13, <italic>SE</italic> = 0.09, <italic>p</italic> = 0.175). Also the interaction between trial order and Group was non-significant (&#x003B2; = 0.14, <italic>SE</italic> = 0.13, <italic>p</italic> = 0.309). This suggests that the fixed order did not differentially impact the groups or bias the primary comparisons of interest.</p>
</sec>
</sec>
<sec>
<label>3.2</label>
<title>Inner-outer voice congruence rating</title>
<p>Prior to any statistical analysis, we examined distributional assumptions. Shapiro-Wilk tests revealed that the distribution significantly deviated from normality (<italic>W</italic> = 0.884, <italic>p</italic> &#x0003C; 0.001). Visual inspection of histograms confirmed a non-normal distribution with bimodal tendencies (peaks at ratings 1&#x02013;2 and 3&#x02013;4).</p>
<p>To further investigate vocal congruence as a function of Type of Text, we initially fitted a cumulative link mixed model with the &#x0201C;<italic>ordinal</italic>&#x0201D; R&#x00027;s package (<xref ref-type="bibr" rid="B19">Christensen, 2022</xref>) featuring scores of the Inner-Outer Voice Congruence Rating as dependent variable, Group (TGNC vs. cisgender), Type of Text (Gender-neutral vs. Stereotypically Feminine vs. Stereotypically Masculine), and their interaction as fixed factors, and participants as random intercepts. However, the model failed to converge due to quasi-separation issues in the data.</p>
<p>Therefore, we relied on non-parametric testing. To compare ratings between TGNC and cisgender participants across text types, we conducted Wilcoxon rank-sum tests with Bonferroni correction via the &#x0201C;<italic>rstatix</italic>&#x0201D; package (<xref ref-type="bibr" rid="B49">Kassambara, 2023</xref>).</p>
</sec>
<sec>
<label>3.3</label>
<title>Questionnaires</title>
<p>To account for the potential difference in the questionnaires (MAIA, TAS-20, MULTI-GIQ, GMSR) scores between the two groups, we employed one-way ANOVAs. We further examined the role of each variable targeted by questionnaires (interoceptive sensibility, alexithymia, gender identity, gender-related discrimination) on vocal congruence. Given the exploratory nature of these analyses across multiple psychological domains, we ran separate linear mixed models for each subscale, using the same structure and methodology as the main model. To address the risk of Type I errors associated with multiple testing, we applied the Benjamini-Hochberg False Discovery Rate (FDR) correction to all <italic>p</italic>-values across the total set of models. <italic>Post-hoc</italic> contrasts were conducted with the &#x0201C;<italic>emtrends</italic>&#x0201D; function from the &#x0201C;<italic>emmeans</italic>&#x0201D; R package.</p>
<p>We investigated whether individual differences in the general ability to attend to and connect with bodily sensations&#x02014;measured by the MAIA scale&#x02014;and the ability to identify and express feelings&#x02014;assessed via the TAS-20 scale&#x02014;may moderate the effect of the experimental manipulation. Furthermore, we explored how aspects related to gender identity representation, as captured by the MULTI-GIQ, might affect judgments of vocal congruence. Finally, we examined whether experiences of gender discrimination, measured by the GMSR questionnaire, might specifically impair vocal congruence in TGNC participants. In the analyses we will only focus on the impact of each covariate on vocal congruence and its interaction with the different groups as a function of experimental conditions. For the full set of additional models please refer to the <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref>.</p>
<p>Like for the previous vocal congruence model, we compared for each model featuring covariates the full model with the three-way interaction to a reduced model with only the two-way interaction between Condition and Group. Since the full model did not significantly improve the fit, <inline-formula><mml:math id="M3"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>12</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 7.92, <italic>p</italic> = 0.791, and had higher AIC and BIC values (AIC = 2.504 vs. 2,488; BIC = 2,583 vs. 2,519), we retained the two-way interaction model for each covariate.</p>
</sec>
</sec>
<sec sec-type="results" id="s4">
<label>4</label>
<title>Results</title>
<sec>
<label>4.1</label>
<title>Vocal congruence task</title>
<p>Density distributions of average VCS scores across Conditions and Groups suggest distinct performance profiles. Cisgender participants consistently exhibit slightly higher peak densities and appear to have somewhat more concentrated distributions around higher average scores. The TGNC group demonstrates more varied distribution patterns across modalities, with the Listening condition showing the most distinct separation between the groups (see <xref ref-type="fig" rid="F1">Figure 1</xref>).</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Density distributions of VCS average scores across conditions and groups. This figure presents density distributions of average scores across the three conditions (silent reading, reading aloud, and listening) and groups (TGNC vs. Cisgender).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcogn-05-1638501-g0001.tif">
<alt-text content-type="machine-generated">Grouped histogram with density curves compares average scores for TGNC (green) and Cisgender (purple) groups across three conditions: Silent Reading, Reading Aloud, and Listening. Scores range from 10 to 50 on the x-axis, with density on the y-axis.</alt-text>
</graphic>
</fig>
<p>Our first research question investigates whether linguistic gender stereotypes affect the perception of vocal congruence in different conditions as a function of different gender identity experiences.</p>
<p>Looking at intra-class correlations, approximately 51.5% of the total variance in VCS Total scores was attributable to stable differences between participants.</p>
<p>The three-way interaction among Condition, Group, and Type of Text was not significant, <inline-formula><mml:math id="M4"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>4</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 0.34, <italic>p</italic> = 0.986. There was, however, a significant two-way interaction between Condition and Group, <inline-formula><mml:math id="M5"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 47.24, <italic>p</italic> &#x0003C; 0.001. Pairwise comparisons show that the two groups differed in the Silent Reading condition, &#x003B2; = &#x02212;3.91, <italic>t</italic> = &#x02212;2.250, <italic>SE</italic> = 1.74, <italic>p</italic> = 0.028, with TGNC participants reporting lower vocal congruence compared to cisgender participants (<italic>EMM TGNC</italic> = 34.9, <italic>SE</italic> = 1.22, UCL = 32.5, LCL = 37.4; <italic>EMM Cisgender</italic> = 38.8, <italic>SE</italic> = 1.23, LCL = 36.4, UCL = 41.3). Likewise, the two groups scores also differed in the Reading Aloud condition, &#x003B2; = &#x02013;10.55, <italic>t</italic> = &#x02212;6.070, <italic>SE</italic> = 1.74, <italic>p</italic> &#x0003C; 0.001, with TGNC participants reporting significantly lower vocal congruence compared to cisgender participants (<italic>EMM TGNC</italic> = 28.7, <italic>SE</italic> = 1.23, LCL = 26.2, UCL = 31.1; <italic>EMM Cisgender</italic> = 39.2, <italic>SE</italic> = 1.23, LCL = 36.8, UCL = 41.7). Finally, the two groups differed in the Listening condition too, &#x003B2; = &#x02013;11.48, <italic>t</italic> = &#x02212;6.606, <italic>SE</italic> = 1.74, <italic>p</italic> &#x0003C; 0.001, with TGNC participants reporting significantly lower vocal congruence compared to cisgender participants (<italic>EMM TGNC</italic> = 27.1, <italic>SE</italic> = 1.23, LCL = 24.6, UCL = 29.5; <italic>EMM Cisgender</italic> = 38.6, <italic>SE</italic> = 1.23, LCL = 36.1, UCL = 41.0). Finally, TGNC participants gave higher scores of vocal congruence in the Silent Reading compared to the Reading Aloud, &#x003B2; = 6.22, <italic>t</italic> = 7.334, <italic>SE</italic> = 0.84, <italic>p</italic> &#x0003C; 0.001, and Listening conditions, &#x003B2; = 1.61, <italic>t</italic> = 9.234, <italic>SE</italic> = 0.84, <italic>p</italic> &#x0003C; 0.001, while there was no difference between the Reading Aloud condition and the Listening condition, &#x003B2; = 1.61, <italic>SE</italic> = 0.84, <italic>p</italic> = 0.141 (<italic>EMM Silent Reading</italic> = 33.9, <italic>SE</italic> = 1.23, LCL = 32.5, UCL = 34.7; <italic>EMM Reading Aloud</italic> = 28.7, <italic>SE</italic> = 1.23, LCL = 26.2, UCL = 31.1; <italic>EMM Listening</italic> = 27.1, <italic>SE</italic> = 1.23, LCL = 24.6, UCL = 29.5). No significant difference emerged across conditions for the cisgender group, all <italic>p</italic><sub>s</sub> &#x02265; 0.701 (see <xref ref-type="fig" rid="F2">Figure 2</xref>).</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Interaction plots of VCS total scores by condition and group. Distribution of predicted vocal congruence scale (VCS) total scores across the three conditions (silent reading: blue; reading aloud: green; listening: pink) for TGNC and Cisgender participants. Thick black dots represent estimated marginal means and their standard errors (vertical thick black line), and dots represent raw data. <sup>&#x0002A;</sup> indicates <italic>p</italic> &#x0003C; .05; <sup>&#x0002A;&#x0002A;</sup> indicates <italic>p</italic> &#x0003C; .01; <sup>&#x0002A;&#x0002A;&#x0002A;</sup> indicates <italic>p</italic> &#x0003C; .001.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcogn-05-1638501-g0002.tif">
<alt-text content-type="machine-generated">Box plot comparing VCS total scores for TGNC and cisgender groups across silent reading, reading aloud, and listening conditions. Cisgender group shows higher scores across all conditions. Asterisks denote significant differences. Individual data points are overlaid.</alt-text>
</graphic>
</fig>
<p>The model additionally indicated significant main effects of Condition, <inline-formula><mml:math id="M6"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 48.52, <italic>p</italic> &#x0003C; 0.001, and Group, <inline-formula><mml:math id="M7"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 29.44, <italic>p</italic> &#x0003C; 0.001, while Text was not significant, <inline-formula><mml:math id="M8"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 2.08, <italic>p</italic> = 0.352. No other main effects or interactions reached significance (all <italic>ps</italic> &#x0003E; 0.218; please refer to <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref> for report of model results).</p>
</sec>
<sec>
<label>4.2</label>
<title>Inner-outer congruence rating</title>
<p>Density distributions reveal that TGNC participants display greater variability compared to Cisgender participants, and reported consistently lower alignment between internal and external voice perception (<xref ref-type="fig" rid="F3">Figure 3</xref>).</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Density distributions for the inner-outer voice congruence rating across type of texts and groups. Density distributions of participants&#x00027; responses to &#x0201C;How closely does your voice as you perceive it internally correspond to the recorded voice,&#x0201D; averaged for texts types (gender-neutral, stereotypically feminine, stereotypically masculine) and groups (TGNC vs. Cisgender). The scale was administered on five-point Likert scales ranging from 1 = &#x0201C;<italic>not at all</italic>&#x0201D; to 5 = &#x0201C;<italic>completely</italic>&#x0201D;.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcogn-05-1638501-g0003.tif">
<alt-text content-type="machine-generated">Grouped bar and density plots display Inner-Outer Voice Congruence Rating distributions for TGNC and cisgender participants across Neuter 1, Masculine, Neuter 2, and Feminine voice types, with TGNC data in green and cisgender in purple.</alt-text>
</graphic>
</fig>
<p>Wilcoxon rank-sum tests revealed significant differences between TGNC and cisgender participants across all text, all <italic>ps</italic> &#x02264; 0.002 (Bonferroni adjusted, see <xref ref-type="table" rid="T5">Table 5</xref>).</p>
<table-wrap position="float" id="T5">
<label>Table 5</label>
<caption><p>Descriptive statistics (means and standard deviations) of inner-outer voice congruence rating scores for Cisgender and TGNC participants along with Wilcoxon Rank-Sum Tests results comparing their judgments.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Text type</bold></th>
<th valign="top" align="center"><bold><italic>M</italic> Cisgender</bold></th>
<th valign="top" align="center"><bold><italic>SD</italic> Cisgender</bold></th>
<th valign="top" align="center"><bold><italic>M</italic> TGNC</bold></th>
<th valign="top" align="center"><bold><italic>SD</italic> TGNC</bold></th>
<th valign="top" align="center"><bold><italic>W</italic></bold></th>
<th valign="top" align="center"><bold><italic>P</italic> adj</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Neuter 1</td>
<td valign="top" align="center">3.04</td>
<td valign="top" align="center">0.99</td>
<td valign="top" align="center">1.86</td>
<td valign="top" align="center">0.99</td>
<td valign="top" align="center">97.50</td>
<td valign="top" align="center">0.002</td>
</tr>
<tr>
<td valign="top" align="left">Masculine</td>
<td valign="top" align="center">3.40</td>
<td valign="top" align="center">0.90</td>
<td valign="top" align="center">1.63</td>
<td valign="top" align="center">0.95</td>
<td valign="top" align="center">50.50</td>
<td valign="top" align="center">&#x0003C;0.001</td>
</tr>
<tr>
<td valign="top" align="left">Neuter 2</td>
<td valign="top" align="center">3.22</td>
<td valign="top" align="center">1.02</td>
<td valign="top" align="center">1.72</td>
<td valign="top" align="center">1.07</td>
<td valign="top" align="center">74.50</td>
<td valign="top" align="center">&#x0003C;0.001</td>
</tr>
<tr>
<td valign="top" align="left">Feminine</td>
<td valign="top" align="center">3.40</td>
<td valign="top" align="center">1.05</td>
<td valign="top" align="center">1.77</td>
<td valign="top" align="center">0.97</td>
<td valign="top" align="center">68.00</td>
<td valign="top" align="center">&#x0003C;0.001</td>
</tr></tbody>
</table>
</table-wrap>
<p>Descriptive statistics averaging the two groups ratings across conditions further evidenced the lower perceived congruence between TGNC&#x00027;s group inner vs. outer voice (<italic>M</italic> TGNC = 1.75; <italic>SD</italic> = 0.98; <italic>M</italic> Cisgender = 3.27; <italic>SD</italic> = 0.99).</p>
<p>Overall, the results tackling vocal congruence as emerging from the analyses of both the Vocal Congruence Scale and the Inner-Outer Congruence Rating suggest that TGNC participants experience vocal congruence in an idiosyncratic and subjective manner. Specifically, in the VCS assessing more general aspects connected to voice representation (e.g., control, adherence to identity, satisfaction, and awareness) the two groups differed in all conditions (i.e., Silent Reading, Reading Aloud, and Listening)&#x02014;showing a decreasing perceived congruence for the TGNC group as a function of whether the condition required an auditory feedback or not. Hence, we found the starkest difference between the two groups in the Listening condition. The internal representation of their speech from the early stage (Silent Reading) to the latest stages of articulatory and/or auditory components (respectively, the Reading Aloud and Listening condition) would be perceived as not reflecting features of the bodily self. Moreover, the rating task investigating the perceived mismatch between one&#x00027;s own inner vs. outer voice highlighted the two groups systematically diverged in their assessments&#x02014;with the TGNC group once again reporting lower congruence compared to the cisgender group, regardless of the experimental condition.</p>
</sec>
<sec>
<label>4.3</label>
<title>Questionnaires: relationship with vocal congruence</title>
<p>The previous analyses showed that vocal congruence varies across gender identities, with transgender and gender non-conforming (TGNC) participants systematically reporting lower congruence compared to cisgender participants. To better understand this phenomenon, we fitted separate models investigating the impact of specific aspects related to interoceptive and metacognitive-emotional components as well as aspects related to gender identity on vocal congruence (see Data Analysis). For space reasons, below we only report results from models in which the interaction between specific covariates, experimental condition, and group&#x02014;where relevant&#x02014;is significant. For the complete report of results please refer to <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref>.</p>
<sec>
<label>4.3.1</label>
<title>Interoception</title>
<sec>
<label>4.3.1.1</label>
<title>Interoceptive sensibility</title>
<p>The two groups differed on two out of the 10 subscales of the MAIA questionnaire (see <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref> for all the results). Specifically, TGNC participants gave lower scores of Self-Regulation compared to cisgender participants, <italic>F</italic><sub>(1)</sub> = 10.1, <italic>p</italic> = 0.002, <italic>M</italic> TGNC = 2.29; <italic>SD</italic> = 1.25; <italic>M</italic> cisgender = 3.34; <italic>SD</italic> = 0.90. In addition, TGNC participants also gave lower scores of Trust compared to cisgender participants, <italic>F</italic><sub>(1)</sub> = 35.8, <italic>p</italic> &#x0003C; 0.001, <italic>M</italic> TGNC = 2.68; <italic>SD</italic> = 1.22; <italic>M</italic> Cisgender = 4.56; <italic>SD</italic> = 0.83.</p>
<sec>
<label>4.3.1.1.1</label>
<title>Emotional awareness</title>
<p>We observed a significant three-way interaction between Group, Condition, and Emotional Awareness, <inline-formula><mml:math id="M9"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 21.34, <italic>p</italic> adj &#x0003C; 0.001. Estimated trends showed that for the TGNC group higher VCS scores in the Silent Reading condition are associated with significantly higher scores of Emotional Awareness (&#x003B2; <italic>TGNC</italic> = 2.84, <italic>SE</italic> = 1.06, <italic>t</italic> = 2.687, LCL = 0.71, UCL = 4.97, <italic>p</italic> = 0.009). No other significant comparisons reached significance, all <italic>ps</italic> &#x02265; 0.591.</p>
<p>The results also indicated a significant two-way interaction between Condition and Emotional Awareness, <inline-formula><mml:math id="M10"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 18.81, <italic>p</italic> adj &#x0003C; 0.001, as well as a significant two-way interaction between Condition and Group, <inline-formula><mml:math id="M11"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 62.62, <italic>p</italic> adj &#x0003C; 0.001. Finally, the model yielded to significant main effects for Condition, <inline-formula><mml:math id="M12"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 69.88, <italic>p</italic> adj &#x0003C; 0.001, and Group, <inline-formula><mml:math id="M13"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 27.81, <italic>p</italic> adj &#x0003C; 0.001. No main effect of Emotional Awareness was detected, <inline-formula><mml:math id="M14"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 0.52, <italic>p</italic> adj = 0.546, all other <italic>ps</italic> adj &#x0003E; 0.759.</p>
</sec>
<sec>
<label>4.3.1.1.2</label>
<title>Not worrying</title>
<p>The model yielded significant three-way interaction between Condition, Group, and Not-Worrying, <inline-formula><mml:math id="M15"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 8.41, <italic>p</italic> adj = 0.026. Estimated linear trends showed that for TGNC Group higher scores on the VCS scores in the Reading Aloud Condition are associated with significantly higher scores of Not Worrying (&#x003B2; <italic>TGNC</italic> = 3.05, <italic>SE</italic> = 0.97, <italic>t</italic> = 3.142, LCL = 1.10, UCL = 5.00, <italic>p</italic> = 0.002). In addition, for TGNC Group higher scores on the VCS in the Listening Condition are associated with significantly higher scores of Not Worrying (&#x003B2; <italic>TGNC</italic> = 3.58, <italic>SE</italic> = 0.92, <italic>t</italic> = 3.684, LCL = 1.63, UCL = 5.53, <italic>p</italic> &#x0003C; 0.001). No other significant comparison emerged, all <italic>ps</italic> &#x02265; 0.191.</p>
<p>The results also indicated a significant two-way interaction between Condition and Group, <inline-formula><mml:math id="M16"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 57.44, <italic>p</italic> adj &#x0003C; 0.001, as well as significant main effects of Group, <inline-formula><mml:math id="M17"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 29.56, <italic>p</italic> adj &#x0003C; 0.001, and Condition, <inline-formula><mml:math id="M18"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 61.64, <italic>p</italic> adj &#x0003C; 0.001. No other two-way interactions or main effects reached significance, all <italic>ps</italic> adj &#x02265; 0.060.</p>
</sec>
<sec>
<label>4.3.1.1.3</label>
<title>Trusting</title>
<p>We observed a significant three-way interaction between Group, Condition, and Trusting, <inline-formula><mml:math id="M19"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 19.91, <italic>p</italic> adj &#x0003C; 0.001. Estimated linear trends showed that for the TGNC Group higher VCS scores in the Reading Aloud Condition are associated with significantly high scores of Trust (&#x003B2; <italic>TGNC</italic> = 3.14, <italic>SE</italic> = 0.99, <italic>t</italic> = 3.181, LCL = 1.16, UCL = 5.13, <italic>p</italic> = 0.002). Moreover, for the TGNC Group higher VCS scores in the Listening Condition are associated with significantly high scores of Trust (&#x003B2; <italic>TGNC</italic> = 3.19, <italic>SE</italic> = 0.99, <italic>t</italic> = 3.222, LCL = 1.20, UCL = 5.17, <italic>p</italic> = 0.002). No other comparison reached significance, all <italic>ps</italic> &#x0003E; 0.216.</p>
<p>The results also indicated a significant two-way interaction between Condition and Trusting, <inline-formula><mml:math id="M20"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 29.67, <italic>p</italic> adj &#x0003C; 0.001. For this model, the two-way interaction between Condition and Group was not significant, <inline-formula><mml:math id="M21"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 5.06, <italic>p</italic> adj = 0.124. We observed significant main effects for Condition, <inline-formula><mml:math id="M22"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 11.18, <italic>p</italic> adj = 0.007, and Group, <inline-formula><mml:math id="M23"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 9.74, <italic>p</italic> adj = 0.003. The main effect of Trusting was not statistically significant, <inline-formula><mml:math id="M24"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 1.07, <italic>p</italic> adj = 0.368.</p>
<p>Interoceptive sensibility and its relation with vocal congruence seems to be mostly intertwined with sensory and auditorily feedback. Indeed, we found that, only for TGNC participants, the capability of not worrying about physical discomfort and to trust their own body is associated with higher perceived vocal congruence. Finally, we also found that being able to associate physical sensations to one&#x00027;s own specific feelings or emotions enhanced vocal congruence in the Silent Reading condition for TGNC participants but not for cisgender participants (see <xref ref-type="fig" rid="F4">Figure 4</xref>).</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Predicted total VCS scores by emotional awareness, not worrying, and trusting quartiles, condition, and group. Predicted values from a linear model including emotional awareness, not worrying, and trusting (mean-centered), condition (silent reading, reading aloud, listening), and group (TGNC vs. Cisgender), as well as their interactions. Error bars represent 95% confidence intervals. Covariates were divided into quartiles for visualization purposes.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcogn-05-1638501-g0004.tif">
<alt-text content-type="machine-generated">Three side-by-side scatter plots with error bars display Predicted Total VCS versus mean quartiles for Emotional Awareness, Not Worrying, and Trusting across TGNC and Cisgender groups. Data is differentiated by condition: Silent Reading (blue), Reading Aloud (green), and Listening (pink). Each plot compares trends and variability in VCS scores by group and quartile for the three conditions.</alt-text>
</graphic>
</fig>
</sec>
</sec>
</sec>
<sec>
<label>4.3.2</label>
<title>Alexithymia</title>
<p>We investigated differences between the two groups in terms of alexithymia. First, we considered the global scores of the TAS-20 questionnaire. There was a difference between the two groups, <italic>F</italic><sub>(1)</sub> = 10.87, <italic>p</italic> = 0.002, such that the mean outcome for the TGNC group (<italic>M</italic> = 56.40; <italic>SD</italic> = 11.60) was significantly higher than that for the cisgender group (<italic>M</italic> = 45.18; <italic>SD</italic> = 10.97) by an average of 11.23 units. So, overall, TGNC participants reported higher levels of alexithymia than cisgender participants. Looking into the subscales of the TAS-20 questionnaire, we found that the groups differed in terms of Difficulty in Identifying Feelings, <italic>F</italic><sub>(1)</sub> = 19.61, <italic>p</italic> &#x0003C; 0.001, Difficulty in Describing Feelings (DDF), <italic>F</italic><sub>(1)</sub> = 5.469, <italic>p</italic> = 0.024, but not in terms of Externally-Oriented Thinking, <italic>F</italic><sub>(1)</sub> = 0.152, <italic>p</italic> = 0.698.</p>
<p>To control for the effect of alexithymia on vocal congruence we fitted four generalized linear mixed models, inserting Total TAS-20 scores and the sub-dimensions of the questionnaire as covariates in interaction with the predictors Condition and Group. The complete results are available in the <xref ref-type="supplementary-material" rid="SM1">Supplementary materials</xref>.</p>
<sec>
<label>4.3.2.1</label>
<title>Total TAS-20</title>
<p>The three-way interaction between Condition, Group, and Total Tas-20 scores did not reach significance, <inline-formula><mml:math id="M25"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 0.43, <italic>p</italic> = 0.805. However, the model yielded instead a significant two-way interaction between Condition and Group, <inline-formula><mml:math id="M26"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 38.20, <italic>p</italic> &#x0003C; 0.001, and between Group and Total Tas-20 scores, <inline-formula><mml:math id="M27"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 6.99, <italic>p</italic> = 0.008. Estimated linear trends show that for the TGNC Group lower scores on the VCS are associated with significantly higher Total Tas-20 scores (&#x003B2; <italic>TGNC</italic> = &#x02212;0.270, <italic>SE</italic> = 0.09, <italic>t</italic> = &#x02212;2.946, LCL = &#x02212;0.45, UCL = &#x02212;0.08, <italic>p</italic> = 0.005). No other comparisons reached significance, all <italic>ps</italic> &#x0003E; 0.398 (see <xref ref-type="fig" rid="F5">Figure 5</xref>).</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Predicted total VCS scores by total Tas-20 scores quartiles, condition, and group. Predicted values from a linear model including total Tas-20 (mean-centered), condition (silent reading, reading aloud, listening), and group (TGNC vs. Cisgender), as well as their interactions. Error bars represent 95% confidence intervals. Total Tas-20 was divided into quartiles for visualization purposes.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fcogn-05-1638501-g0005.tif">
<alt-text content-type="machine-generated">Scatter plot showing predicted total VCS on the y-axis and total Tas-20 score (mean quartiles) on the x-axis, comparing TGNC and cisgender groups with error bars represented by dots in green and blue.</alt-text>
</graphic>
</fig>
<p>The results also indicated a significant main effect of Group, <inline-formula><mml:math id="M28"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 20.43, <italic>p</italic> &#x0003C; 0.001, and Condition, <inline-formula><mml:math id="M29"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 58.69, <italic>p</italic> &#x0003C; 0.001. The effect of Total Alexithymia was not significant, <inline-formula><mml:math id="M30"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 1.97, <italic>p</italic> = 0.160.</p>
</sec>
<sec>
<label>4.3.2.2</label>
<title>Externally oriented thinking</title>
<p>The results indicated a three-way interaction between Group, Condition, and EOT, <inline-formula><mml:math id="M31"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 7.73, <italic>p</italic> adj = 0.034. Estimated trends show that for the TGNC Group lower VCS scores in the Silent Reading Condition are associated with significantly high scores of Externally-Oriented Thinking (&#x003B2; <italic>TGNC</italic> = &#x02212;0.64, <italic>SE</italic> = 0.30, <italic>t</italic> = &#x02212;2.126, LCL = &#x02212;1.24, UCL = &#x02212;0.03, <italic>p</italic> = 0.038). No other significant comparisons reached significance, all <italic>ps</italic> &#x0003E; 0.193.</p>
<p>The two way interactions between EOT and Condition (<italic>p</italic> adj = 0.487) and Group (<italic>p</italic> adj = 1.75) were not significant. However, a significant interaction between Condition and Group was observed, <inline-formula><mml:math id="M32"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 63.70, <italic>p</italic> adj &#x0003C; 0.001. We observed significant main effects of Group <inline-formula><mml:math id="M33"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 28.61, <italic>p</italic> adj &#x0003C; 0.001, and Condition, <inline-formula><mml:math id="M34"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 72.26, <italic>p</italic> adj &#x0003C; 0.001. The effect of EOT was statistically negligible, <inline-formula><mml:math id="M35"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 0.001, <italic>p</italic> = 0.970.</p>
<p>Overall, we observed a difference between the two groups in terms of the total alexithymia levels&#x02014;such that TGNC participants scored significantly higher than cisgender participants. This difference was relevant with respect to how easily they could focus their attention on external stimuli, but critically only in the Silent Reading condition. Specifically, the more TGNC participants oriented their thoughts outward during the Silent Reading, the more incongruent the experience of their inner voice might have been perceived.</p>
</sec>
</sec>
<sec>
<label>4.3.3</label>
<title>Gender identity</title>
<p>Results from the model featuring scores of the MULTI-GIQ suggested there was no relation between gender identity aspects as measured by the questionnaire and vocal congruence scores, all <italic>ps</italic> &#x02265; 0.057.</p>
</sec>
<sec>
<label>4.3.4</label>
<title>Gender minority stress and discrimination</title>
<sec>
<label>4.3.4.1</label>
<title>Discrimination</title>
<p>We found significant interaction between Condition and Discrimination, <inline-formula><mml:math id="M36"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 19.37, <italic>p</italic> adj &#x0003C; 0.001. Estimated marginal trends showed that higher scores of Discrimination correspond to lower scores on the VCS in the Silent Reading condition (&#x003B2; <italic>Silent</italic> = &#x02212;1.44, <italic>SE</italic> = 0.60, <italic>t</italic> = &#x02212;2.369, LCL = &#x02212;2.69, UCL = &#x02212;0.19, <italic>p</italic> = 0.025). No other significant comparison emerged, all <italic>ps</italic> &#x02265; 0.734.</p>
<p>The model also yielded a significant main effect of Condition, <inline-formula><mml:math id="M37"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 92.91, <italic>p</italic> adj &#x0003C; 0.001, while there was no main effect of Discrimination, <inline-formula><mml:math id="M38"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 0.62, <italic>p</italic> adj = 0.415.</p>
</sec>
<sec>
<label>4.3.4.2</label>
<title>Internalized transphobia</title>
<p>We found a significant two-way interaction between Condition and Internalized Transphobia, <inline-formula><mml:math id="M39"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 8.98, <italic>p</italic> adj = 0.018. Estimated marginal trends showed that for TGNC participants lower scores on the VCS scores in the Listening condition are associated with significantly higher scores of Internalized Transphobia (&#x003B2; <italic>Listening</italic> = &#x02212;0.338, <italic>SE</italic> = 0.15, <italic>t</italic> = &#x02212;2.142, LCL = &#x02212;0.66, UCL = &#x02212;0.01, <italic>p</italic> = 0.041). No other significant comparisons emerged, all <italic>ps</italic> &#x02265; 0.155.</p>
<p>The model also yielded a significant main effect of Condition, <inline-formula><mml:math id="M40"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 89.16, <italic>p</italic> adj = &#x0003C; 0.001 and. The main effect of Internalized Transphobia was not statistically significant, <inline-formula><mml:math id="M41"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 1.83, <italic>p</italic> adj = 0.244.</p>
</sec>
<sec>
<label>4.3.4.3</label>
<title>Non-disclosure</title>
<p>We found a significant two-way interaction between Condition and Non-disclosure, <inline-formula><mml:math id="M42"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 29.18, <italic>p</italic> adj &#x0003C; 0.001. Estimated linear trends showed that for TGNC participants lower scores on the VCS scale in the Reading Aloud condition are associated with significantly high scores of Non-disclosure (&#x003B2; <italic>Reading Aloud</italic> = &#x02212;0.47, <italic>SE</italic> = 0.22, <italic>t</italic> = &#x02212;2.088, LCL = &#x02212;0.93, UCL = &#x02212;0.008, <italic>p</italic> = 0.046). Also, we found that for TGNC Group lower scores on the VCS scale in the Listening condition are associated with significantly high scores of Non-disclosure (&#x003B2; <italic>Listening</italic> = &#x02212;0.53, <italic>SE</italic> = 0.22, <italic>t</italic> = &#x02212;2.346, LCL = &#x02212;0.99, UCL = &#x02212;0.06, <italic>p</italic> = 0.026). No other significant comparison emerged, all <italic>ps</italic> &#x02265; 0.401.</p>
<p>The model also yielded a significant main effect of Condition, <inline-formula><mml:math id="M43"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 96.45, <italic>p</italic> adj &#x0003C; 0.001. The effect of Non-disclosure was not statistically significant, <inline-formula><mml:math id="M44"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 1.66, <italic>p</italic> adj = 0.253.</p>
</sec>
<sec>
<label>4.3.4.4</label>
<title>Pride</title>
<p>We found a significant two-way interaction between Condition and Pride, <inline-formula><mml:math id="M45"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 10.75, <italic>p</italic> adj = 0.008. However, no significant relation emerged from <italic>post-hoc</italic> comparisons, all <italic>ps</italic> &#x02265; 0.217 (&#x003B2; <italic>Silent Reading</italic> = &#x02212;0.23, <italic>SE</italic> = 0.18, <italic>t</italic> = &#x02212;1.263, LCL = &#x02212;0.60, UCL = 0.14, <italic>p</italic> = 0.217; &#x003B2; <italic>Reading Aloud</italic> = 0.06, <italic>SE</italic> = 0.18, <italic>t</italic> = 0.332, LCL = &#x02212;0.31, UCL = 0.43, <italic>p</italic> = 0.742; &#x003B2; <italic>Listening</italic> = 0.14, <italic>SE</italic> = 0.18, <italic>t</italic> = 0.781, LCL = &#x02212;0.23, UCL = 0.51, <italic>p</italic> = 0.441).</p>
<p>The model also yielded a significant main effect of Condition, <inline-formula><mml:math id="M46"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 89.80, <italic>p</italic> adj &#x0003C; 0.001. The effect of Pride was not statistically significant, <inline-formula><mml:math id="M47"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 0.003, <italic>p</italic> adj = 0.957.</p>
</sec>
<sec>
<label>4.3.4.5</label>
<title>Community connectedness</title>
<p>We found a significant two-way interaction between Condition and Community Connectedness, <inline-formula><mml:math id="M48"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 7.32, <italic>p</italic> adj = 0.038. However, no significant relation emerged from <italic>post-hoc</italic> comparisons, all <italic>ps</italic> &#x02265; 0.140 (&#x003B2; <italic>Silent Reading</italic> = &#x02212;0.48, <italic>SE</italic> = 0.31, <italic>t</italic> = &#x02212;1.519, <italic>p</italic> = 0.140, LCL = &#x02212;1.13, UCL = 1.69; &#x003B2; <italic>Reading Aloud</italic> = 0.01, <italic>SE</italic> = 0.31, <italic>t</italic> = 0.062, LCL = &#x02212;0.63, UCL = 0.67, <italic>p</italic> = 0.950; &#x003B2; <italic>Listening</italic> = 0.002, <italic>SE</italic> = 0.31, <italic>t</italic> = 0.008, LCL = &#x02212;0.64, UCL = 0.65, <italic>p</italic> = 0.996).</p>
<p>The model also yielded a significant main effect of Condition, <inline-formula><mml:math id="M49"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 88.56, <italic>p</italic> adj &#x0003C; 0.001. The effect of Community Connectedness was not statistically significant, <inline-formula><mml:math id="M50"><mml:msubsup><mml:mrow><mml:mi>&#x003C7;</mml:mi></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup></mml:math></inline-formula> = 0.27, <italic>p</italic> adj = 0.676.</p>
<p>Altogether, the results from the questionnaire addressing factors related to stress and discrimination driven by gender identity provide a nuanced picture of their relation with vocal congruence. Indeed, we found that TGNC participants who had experienced more social discrimination on the basis of their gender identity also reported lower vocal congruence in the Silent Reading condition&#x02014;suggesting these negative experiences might have been systematically internalized. By contrast, TGNC participants who internalized social prejudice about TGNC identity and were less confident in self-disclosure do not recognize their outer voice (Reading Aloud and Listening), reporting lower congruence scores.</p>
</sec>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="s5">
<label>5</label>
<title>Discussion</title>
<p>In this study, we explored vocal congruence across different gender identities, comparing cisgender with transgender and gender nonconforming participants (TGNC) on a self-voice perception task in three conditions: (i) Silent Reading, (ii) Reading Aloud, and (iii) Listening to their recorded voice. We then investigated whether the observed differences could be accounted for by individual differences in interoceptive sensibility&#x02014;the awareness of internal bodily sensations&#x02014;and alexithymia. Additionally, we explored whether these effects were shaped by the flexibility of participants&#x00027; gender identity representations and influenced by experiences of gender-related discrimination.</p>
<p>We predicted that TGNC participants would report lower scores of vocal congruence in conditions in which they were asked to pay attention to their outer voice compared to cisgender participants&#x02014;while reporting higher congruence following silent reading. Also, we expected that reading a gender-stereotyped text would decrease vocal congruence in TGNC participants. Results partially confirmed our first prediction, with TGNC participants experiencing lower vocal congruence in both Reading Aloud and Listening conditions compared to cisgender participants. Unexpectedly, this difference persisted in the Silent Reading condition. Results from the Inner-Outer Voice Congruence Rating consolidated these findings, showing that TGNC participants perceived a greater mismatch between their internal representation of their inner and outer voice than cisgender participants.</p>
<p>Taken together, the results from the main tasks specifically addressing vocal congruence underscore the dynamic and reciprocal relationship between core components of one&#x00027;s self-concept, such as gender identity, and more embodied identity characteristics, such as the voice. Importantly, our data suggest that for individuals whose gender identity does not fit within binary, heteronormative categories, this reciprocal loop may also be a source of difficulty. Notably, although one might have expected TGNC participants to experience greater vocal incongruence only when directly confronted with their own voice&#x02014;whether recorded or not&#x02014;such mismatch also emerged during internal reading. This pattern may point to subtle forms of internalized stigma, whereby even private aspects of experience, such as one&#x00027;s inner voice, are perceived as misaligned with the voice participants would accept and desire.</p>
<p>Our second prediction concerned the impact of gender stereotypes on the perception of vocal congruence in relation to gender identity. No significant differences emerged between the two groups as to the type of text (gendered vs. neutral), with TGNC participants consistently exhibiting higher vocal incongruence, regardless of the gendered content of the texts. It is possible that the absence of an effect is related to the task instructions. Participants were neither required to memorize the content of the text nor to convey it to another interlocutor. Thus, we might hypothesize that the semantics of the text could be overridden by self-related cognitive, emotional, and interoceptive variables associated with the reading process itself, whether it is internal or external. We further addressed the role of interoceptive sensibility in voice perception within different gender identities. Previous studies have demonstrated that interoception&#x02014;particularly through bone conduction and vibrotactile sensations&#x02014;can influence self-voice perception (<xref ref-type="bibr" rid="B23">Crow et al., 2021</xref>; <xref ref-type="bibr" rid="B78">Orepic et al., 2022</xref>; <xref ref-type="bibr" rid="B102">Smeltzer et al., 2023</xref>). Overall, TGNC participants reported lower ability to self-regulate internal states and lower trust in bodily signals compared to cisgender participants. In relation to vocal congruence, TGNC participants who showed greater Emotional Awareness also tended to report a stronger sense of congruence in the Silent Reading condition. No similar relationship was observed for cisgender participants. We speculate that Emotional Awareness may enhance vocal congruence only in the Silent reading condition, but not in the Reading Aloud and Listening conditions&#x02014;where heightened distress may override the protective effects of Emotional Awareness. Furthermore, visual inspection of the data suggests that as Emotional Awareness increases, vocal congruence improves more markedly in the Silent Reading condition than in the Reading Aloud and Listening conditions, indicating a heightened sensitivity to social exposure. This pattern may highlight a potential distinction between private self-perception and socially mediated voice experiences.</p>
<p>Interestingly, for the TGNC group (but not for cisgender participants) the tendency not to experience emotional distress with sensations of physical discomfort was associated with higher VCS scores during both the Reading Aloud and Listening conditions. This suggests that being able to self-regulate one&#x00027;s own emotional distress when faced with a potentially misaligned auditory stimulus (i.e., participants&#x00027; voice perceived as incongruent) might have acted as a protective factor during the task.</p>
<p>Additionally, higher trust in bodily signals was associated with higher vocal congruence among TGNC individuals in both the Listening and Reading Aloud conditions. We can speculate that experiencing one&#x00027;s own body sensations as safe and trustworthy again acts as a protection against both psychological and physical discomfort arising from the perception of vocal incongruence. Visual inspection of the data further suggests that, at higher levels of trust in bodily signals, vocal congruence tends to align across inner (Silent Reading) and outer (Reading Aloud and Listening) voice conditions.</p>
<p>In our study we found that higher overall alexithymia was associated with lower vocal congruence within the TGNC participants, regardless of the experimental condition. This result highlights a general role of emotional awareness in shaping the perception of vocal congruence within this population. Alexithymia is currently understood as a multifaceted and dimensional construct (<xref ref-type="bibr" rid="B5">Bagby et al., 2020</xref>). According to some proposals, the ability to identify and express emotions might may be influenced, among other factors, by gender socialization, with men and women being guided to rely on different cues when gauging their internal states (<xref ref-type="bibr" rid="B9">Berke et al., 2018</xref>; <xref ref-type="bibr" rid="B81">Pennebaker and Roberts, 1992</xref>). Male participants usually score higher than female participants in interoceptive accuracy tasks, as revealed by heartbeat counting tasks (for a review, see: <xref ref-type="bibr" rid="B88">Prentice and Murphy, 2022</xref>; see also <xref ref-type="bibr" rid="B28">Desmedt et al., 2020</xref>). On the other hand, women outperform men in emotion recognition and emotional awareness (<xref ref-type="bibr" rid="B106">Thompson and Voyer, 2014</xref>), while men consistently report higher levels of alexithymia compared to women (<xref ref-type="bibr" rid="B55">Levant et al., 2009</xref>). However, no conclusive evidence on the presence of differences in interoceptive and emotional abilities between genders exists, and different models of explanations have been proposed.</p>
<p>Interestingly, for TGNC participants, also higher externally-oriented thinking (tendency to focus on concrete, practical, and objective aspects of the external environment, rather than on internal contents; <xref ref-type="bibr" rid="B59">Luminet et al., 2004</xref>; <xref ref-type="bibr" rid="B60">Lumley and Bazydlo, 2000</xref>), was related to reduced vocal congruence, but only during the Silent Reading condition. This suggests that TGNC participants with high Externally-Oriented Thinking specifically struggle more to focus on the internal voice during the Silent Reading condition as compared to the Reading Aloud and Listening conditions. Visual inspection of the data further suggests that, at higher levels of External-Oriented Thinking, the outward focus on external stimuli may distract from internal signals, leading to a more &#x0201C;fragmented&#x0201D; experience of inner voice, while articulatory and auditory feedback would instead contribute to the grounding of bodily self. In light of these results, we believe future research is needed to further investigate prevalence and impact of alexithymia within the TGNC population.</p>
<p>Overall there was no effect of participants ontological beliefs about gender/sex on their perception of vocal congruence. However, it seemed vocal congruence judgments of TGNC participants were partly modulated by gender-related discrimination and minority stress levels. We found both proximal (i.e., internalized transphobia and non-disclosure) and distal (i.e., discrimination) stressors to impact vocal congruence perception among different experimental conditions. Distal stressors, which are more related to structural stigma (i.e., social conditions, cultural norms and institutional policies that restrict opportunities and resources, as well as wellbeing for stigmatized groups, see <xref ref-type="bibr" rid="B40">Hatzenbuehler et al., 2024</xref>) negatively affected the inner perception of vocal congruence, with higher reported experience of discrimination associated with lower scores in the Silent Reading condition. Remarkably, this finding might help elucidate why, contrary to our initial expectation, we also found cisgender and TGNC participants to differ in their vocal congruence judgments on the main task also in the inner reading condition. Conversely, proximal stressors like internalized transphobia and more crucially the tendency not to disclose one&#x00027;s own gender identity&#x02014;for example by modifying the way of speaking&#x02014;were both associated with lower VCS scores in Listening and both Reading Aloud and Listening, respectively.</p>
<p>To our knowledge, this is the first study to directly compare vocal congruence between two balanced groups of cisgender and TGNC participants. Our findings are in line with previous literature on voice perception in TGNC individuals, adding layers to this literature by tackling the unexplored topic of vocal congruence in this population. However, this study has some methodological limitations that are worth mentioning. First, we did not control for levels of vocal discomfort prior to the experimental procedure. Second, we are aware that the TGNC group might collapse very different experiences regarding GAHT (Gender-Affirming Hormone Therapy) status or social transition. However, following the request of the center&#x00027;s operators we deliberately refrained from asking for more detailed personal data, besides the eventual stage of gender-affirming procedures and voice training interventions. This was motivated by the necessity of limiting participants&#x00027; potential discomfort experiences, but we acknowledge future research might delve deeper into further factors that may influence this population&#x00027;s vocal congruence. In addition, the specific selection of linguistic stimuli may have limited the influence of gender primes on VCS scores. We also used the original version of the MAIA scale (<xref ref-type="bibr" rid="B66">Mehling et al., 2012</xref>), which, while widely used, has since been revised to improve psychometric properties and address conceptual overlaps across subscales. Lastly, the difficulty in recruiting TGNC participants led to a relatively small sample size, which limited the generalizability of the findings and prevented comparisons across different gender identities within each group, including as a function of the gendered content of the texts. Future research should aim to address these limitations to better capture the complex relationship between vocal congruence, language, and gender identity.</p>
</sec>
<sec sec-type="conclusion" id="s6">
<label>6</label>
<title>Conclusion</title>
<p>This study investigates vocal congruence in two groups that differ in their gender identity. We found that transgender and gender non-conforming participants experienced lower vocal congruence compared to cisgender participants in all experimental conditions of the vocal congruence task, with a larger difference observed when an external auditory feedback was present. This experience of incongruence appears to be modulated by interoceptive sensibility and levels of alexithymia in transgender and gender non-conforming participants but not in cisgender ones. In addition, minority-stress related factors were also associated with higher perceived vocal incongruence in the TGNC group. Further research is needed to deepen our understanding of the relationship between inner experiences and voice perception and to clarify the reciprocal relationship between self-identity and self-voice perception.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s7">
<title>Data availability statement</title>
<p>The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found at: <ext-link ext-link-type="uri" xlink:href="https://osf.io/v2gtn/">https://osf.io/v2gtn/</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="s8">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Ethic Committee of the Department of Dynamic, Clinical and Heath Psychology, Sapienza University of Rome. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s9">
<title>Author contributions</title>
<p>CD: Writing &#x02013; review &#x00026; editing, Conceptualization, Writing &#x02013; original draft, Resources, Formal analysis, Project administration, Data curation, Methodology. CM: Conceptualization, Supervision, Project administration, Data curation, Writing &#x02013; review &#x00026; editing, Methodology, Investigation, Writing &#x02013; original draft, Visualization, Resources, Formal analysis. CF: Visualization, Data curation, Resources, Investigation, Project administration, Conceptualization, Writing &#x02013; original draft, Supervision, Methodology, Formal analysis, Writing &#x02013; review &#x00026; editing, Software. AB: Methodology, Conceptualization, Supervision, Writing &#x02013; review &#x00026; editing, Funding acquisition, Writing &#x02013; original draft, Investigation, Resources.</p>
</sec>
<ack><title>Acknowledgments</title><p>We thank the Gay Center LGBT&#x0002B; Center of Rome and the SAIFIP service (<italic>San Camillo-Forlanini</italic> Hospital Complex, Rome) for their help in the recruitment of participants. We are also grateful to Guido Giovanardi, Marta Mirabella and all the BalLab (Body, Action, and Language Lab) members for insightful comments on this project.</p></ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s11">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s13">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fcogn.2026.1638501/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fcogn.2026.1638501/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alderson-Day</surname> <given-names>B.</given-names></name> <name><surname>Fernyhough</surname> <given-names>C.</given-names></name></person-group> (<year>2015</year>). <article-title>Inner speech: development, cognitive functions, phenomenology, and neurobiology</article-title>. <source>Psychol. Bull.</source> <volume>141</volume>:<fpage>931</fpage>. doi: <pub-id pub-id-type="doi">10.1037/bul0000021</pub-id><pub-id pub-id-type="pmid">26011789</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><collab>American Psychological Association</collab> (<year>2015</year>). <article-title>Guidelines for psychological practice with transgender and gender nonconforming people</article-title>. <source>Am. Psychol.</source> <volume>70</volume>, <fpage>832</fpage>&#x02013;<lpage>864</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0039906</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arnold</surname> <given-names>A. J.</given-names></name> <name><surname>Winkielman</surname> <given-names>P.</given-names></name> <name><surname>Dobkins</surname> <given-names>K.</given-names></name></person-group> (<year>2019</year>). <article-title>Interoception and social connection</article-title>. <source>Front. Psychol.</source> <volume>10</volume>:<fpage>480176</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2019.02589</pub-id><pub-id pub-id-type="pmid">31849741</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bagby</surname> <given-names>R. M.</given-names></name> <name><surname>Parker</surname> <given-names>J. D.</given-names></name> <name><surname>Taylor</surname> <given-names>G. J.</given-names></name></person-group> (<year>1994</year>). <article-title>The twenty-item Toronto alexithymia scale&#x02014;I. Item selection and cross-validation of the factor structure</article-title>. <source>J. Psychosom. Res.</source> <volume>38</volume>, <fpage>23</fpage>&#x02013;<lpage>32</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0022-3999(94)90005-1</pub-id><pub-id pub-id-type="pmid">8126686</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bagby</surname> <given-names>R. M.</given-names></name> <name><surname>Parker</surname> <given-names>J. D.</given-names></name> <name><surname>Taylor</surname> <given-names>G. J.</given-names></name></person-group> (<year>2020</year>). <article-title>Twenty-five years with the 20-item Toronto Alexithymia Scale</article-title>. <source>J. Psychosomatic Res.</source> <volume>131</volume>:<fpage>109940</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jpsychores.2020.109940</pub-id><pub-id pub-id-type="pmid">32007790</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barrett</surname> <given-names>L. F.</given-names></name> <name><surname>Simmons</surname> <given-names>W. K.</given-names></name></person-group> (<year>2015</year>). <article-title>Interoceptive predictions in the brain</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>16</volume>, <fpage>419</fpage>&#x02013;<lpage>429</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nrn3950</pub-id><pub-id pub-id-type="pmid">26016744</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bates</surname> <given-names>D.</given-names></name> <name><surname>Maechler</surname> <given-names>M.</given-names></name> <name><surname>Bolker</surname> <given-names>B.</given-names></name> <name><surname>Walker</surname> <given-names>S.</given-names></name></person-group> (<year>2015</year>). <article-title>Fitting linear mixed-effects models using lme4</article-title>. <source>J. Stat. Softw.</source> <volume>67</volume>, <fpage>1</fpage>&#x02013;<lpage>48</lpage>. doi: <pub-id pub-id-type="doi">10.18637/jss.v067.i01</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Belin</surname> <given-names>P.</given-names></name> <name><surname>Fecteau</surname> <given-names>S.</given-names></name> <name><surname>Bedard</surname> <given-names>C.</given-names></name></person-group> (<year>2004</year>). <article-title>Thinking the voice: neural correlates of voice perception</article-title>. <source>Trends Cogn. Sci.</source> <volume>8</volume>, <fpage>129</fpage>&#x02013;<lpage>135</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2004.01.008</pub-id><pub-id pub-id-type="pmid">15301753</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Berke</surname> <given-names>D. S.</given-names></name> <name><surname>Reidy</surname> <given-names>D.</given-names></name> <name><surname>Zeichner</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>Masculinity, emotion regulation, and psychopathology: a critical review and integrated model</article-title>. <source>Clin. Psychol. Rev.</source> <volume>66</volume>, <fpage>106</fpage>&#x02013;<lpage>116</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cpr.2018.01.004</pub-id><pub-id pub-id-type="pmid">29398184</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bressi</surname> <given-names>C.</given-names></name> <name><surname>Taylor</surname> <given-names>G.</given-names></name> <name><surname>Parker</surname> <given-names>J.</given-names></name> <name><surname>Bressi</surname> <given-names>S.</given-names></name> <name><surname>Brambilla</surname> <given-names>V.</given-names></name> <name><surname>Aguglia</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>1996</year>). <article-title>Cross validation of the factor structure of the 20-item Toronto Alexithymia Scale: an Italian multicenter study</article-title>. <source>J. Psychosomatic Res.</source> <volume>41</volume>, <fpage>551</fpage>&#x02013;<lpage>559</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0022-3999(96)00228-0</pub-id><pub-id pub-id-type="pmid">9032718</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Budge</surname> <given-names>S. L.</given-names></name></person-group> (<year>2020</year>). <article-title>Suicide and the transgender experience: a public health crisis</article-title>. <source>Am. Psychol.</source> <volume>75</volume>:<fpage>380</fpage>. doi: <pub-id pub-id-type="doi">10.1037/amp0000619</pub-id><pub-id pub-id-type="pmid">32250142</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cal&#x000EC;</surname> <given-names>G.</given-names></name> <name><surname>Ambrosini</surname> <given-names>E.</given-names></name> <name><surname>Picconi</surname> <given-names>L.</given-names></name> <name><surname>Mehling</surname> <given-names>W. E.</given-names></name> <name><surname>Committeri</surname> <given-names>G.</given-names></name></person-group> (<year>2015</year>). <article-title>Investigating the relationship between interoceptive accuracy, interoceptive awareness, and emotional susceptibility</article-title>. <source>Front. Psychol.</source> <volume>6</volume>:<fpage>1202</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2015.01202</pub-id><pub-id pub-id-type="pmid">26379571</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Candini</surname> <given-names>M.</given-names></name> <name><surname>Avanzi</surname> <given-names>S.</given-names></name> <name><surname>Cantagallo</surname> <given-names>A.</given-names></name> <name><surname>Zangoli</surname> <given-names>M. G.</given-names></name> <name><surname>Benassi</surname> <given-names>M.</given-names></name> <name><surname>Querzani</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The lost ability to distinguish between self and other voice following a brain lesion</article-title>. <source>NeuroImage Clin.</source> <volume>18</volume>, <fpage>903</fpage>&#x02013;<lpage>911</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.nicl.2018.03.021</pub-id><pub-id pub-id-type="pmid">29876275</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carmel</surname> <given-names>T. C.</given-names></name> <name><surname>Erickson-Schroth</surname> <given-names>L.</given-names></name></person-group> (<year>2016</year>). <article-title>Mental health and the transgender population</article-title>. <source>J. Psychosoc. Nurs. Mental Health Serv.</source> <volume>54</volume>, <fpage>44</fpage>&#x02013;<lpage>48</lpage>. doi: <pub-id pub-id-type="doi">10.3928/02793695-20161208-09</pub-id><pub-id pub-id-type="pmid">28001287</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cattarin</surname> <given-names>J. A.</given-names></name> <name><surname>Thompson</surname> <given-names>J. K.</given-names></name> <name><surname>Thomas</surname> <given-names>C.</given-names></name> <name><surname>Williams</surname> <given-names>R.</given-names></name></person-group> (<year>2000</year>). <article-title>Body image, mood, and televised images of attractiveness: the role of social comparison</article-title>. <source>J. Soc. Clin. Psychol.</source> <volume>19</volume>, <fpage>220</fpage>&#x02013;<lpage>239</lpage>. doi: <pub-id pub-id-type="doi">10.1521/jscp.2000.19.2.220</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chadwick</surname> <given-names>K. A.</given-names></name> <name><surname>Coleman</surname> <given-names>R.</given-names></name> <name><surname>Andreadis</surname> <given-names>K.</given-names></name> <name><surname>Pitti</surname> <given-names>M.</given-names></name> <name><surname>Rameau</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Outcomes of gender-affirming voice and communication modification for transgender individuals</article-title>. <source>Laryngoscope</source> <volume>132</volume>, <fpage>1615</fpage>&#x02013;<lpage>1621</lpage>. doi: <pub-id pub-id-type="doi">10.1002/lary.29946</pub-id><pub-id pub-id-type="pmid">34787313</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chang</surname> <given-names>J.</given-names></name> <name><surname>Yung</surname> <given-names>K.</given-names></name></person-group> (<year>2021</year>). <article-title>Gender affirming voice care: a literature review</article-title>. <source>Int. J. Head Neck Surg.</source> <volume>12</volume>, <fpage>93</fpage>&#x02013;<lpage>97</lpage>. doi: <pub-id pub-id-type="doi">10.5005/jp-journals-10001-1448</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Charlesworth</surname> <given-names>T. E.</given-names></name> <name><surname>Yang</surname> <given-names>V.</given-names></name> <name><surname>Mann</surname> <given-names>T. C.</given-names></name> <name><surname>Kurdi</surname> <given-names>B.</given-names></name> <name><surname>Banaji</surname> <given-names>M. R.</given-names></name></person-group> (<year>2021</year>). <article-title>Gender stereotypes in natural language: word embeddings show robust consistency across child and adult language corpora of more than 65 million words</article-title>. <source>Psychol. Sci.</source> <volume>32</volume>, <fpage>218</fpage>&#x02013;<lpage>240</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0956797620963619</pub-id><pub-id pub-id-type="pmid">33400629</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Christensen</surname> <given-names>R. H. B.</given-names></name></person-group> (<year>2022</year>). <source>ordinal - Regression Models for Ordinal Data</source>. R Package Version 2022, 11&#x02212;16.</mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Conde</surname> <given-names>T.</given-names></name> <name><surname>Gon&#x000E7;alves</surname> <given-names>&#x000D3;. F.</given-names></name> <name><surname>Pinheiro</surname> <given-names>A. P.</given-names></name></person-group> (<year>2018</year>). <article-title>Stimulus complexity matters when you hear your own voice: attention effects on self-generated voice processing</article-title>. <source>Int. J. Psychophysiol.</source> <volume>133</volume>, <fpage>66</fpage>&#x02013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ijpsycho.2018.08.007</pub-id><pub-id pub-id-type="pmid">30114437</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Connolly</surname> <given-names>M. D.</given-names></name> <name><surname>Zervos</surname> <given-names>M. J.</given-names></name> <name><surname>Barone II</surname> <given-names>C. J.</given-names></name> <name><surname>Johnson</surname> <given-names>C. C.</given-names></name> <name><surname>Joseph</surname> <given-names>C. L.</given-names></name></person-group> (<year>2016</year>). <article-title>The mental health of transgender youth: advances in understanding</article-title>. <source>J. Adolescent Health</source> <volume>59</volume>, <fpage>489</fpage>&#x02013;<lpage>495</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jadohealth.2016.06.012</pub-id><pub-id pub-id-type="pmid">27544457</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Critchley</surname> <given-names>H. D.</given-names></name> <name><surname>Garfinkel</surname> <given-names>S. N.</given-names></name></person-group> (<year>2017</year>). <article-title>Interoception and emotion</article-title>. <source>Curr. Opin. Psychol.</source> <volume>17</volume>, <fpage>7</fpage>&#x02013;<lpage>14</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.copsyc.2017.04.020</pub-id><pub-id pub-id-type="pmid">28950976</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Crow</surname> <given-names>K. M.</given-names></name> <name><surname>van Mersbergen</surname> <given-names>M.</given-names></name> <name><surname>Payne</surname> <given-names>A. E.</given-names></name></person-group> (<year>2021</year>). <article-title>Vocal congruence: the voice and the self measured by interoceptive awareness</article-title>. <source>J. Voice</source> <volume>35</volume>, <fpage>324</fpage>&#x02013;<lpage>e15</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jvoice.2019.08.027</pub-id><pub-id pub-id-type="pmid">31558332</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dacakis</surname> <given-names>G.</given-names></name> <name><surname>Davies</surname> <given-names>S.</given-names></name> <name><surname>Oates</surname> <given-names>J. M.</given-names></name> <name><surname>Douglas</surname> <given-names>J. M.</given-names></name> <name><surname>Johnston</surname> <given-names>J. R.</given-names></name></person-group> (<year>2013</year>). <article-title>Development and preliminary evaluation of the transsexual voice questionnaire for male-to-female transsexuals</article-title>. <source>J. Voice</source> <volume>27</volume>, <fpage>312</fpage>&#x02013;<lpage>320</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jvoice.2012.11.005</pub-id><pub-id pub-id-type="pmid">23415146</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Davies</surname> <given-names>S.</given-names></name> <name><surname>Papp</surname> <given-names>V. G.</given-names></name> <name><surname>Antoni</surname> <given-names>C.</given-names></name></person-group> (<year>2015</year>). <article-title>Voice and communication change for gender nonconforming individuals: giving voice to the person inside</article-title>. <source>Int. J. Transgender.</source> <volume>16</volume>, <fpage>117</fpage>&#x02013;<lpage>159</lpage>. doi: <pub-id pub-id-type="doi">10.1080/15532739.2015.1075931</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>De Bruin</surname> <given-names>M. D.</given-names></name> <name><surname>Coerts</surname> <given-names>M. J.</given-names></name> <name><surname>Greven</surname> <given-names>A. J.</given-names></name></person-group> (<year>2000</year>). <article-title>Speech therapy in the management of male-to-female transsexuals</article-title>. <source>Folia phoniatrica logopaedica</source> <volume>52</volume>, <fpage>220</fpage>&#x02013;<lpage>227</lpage>. doi: <pub-id pub-id-type="doi">10.1159/000021537</pub-id><pub-id pub-id-type="pmid">10965175</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>De Vignemont</surname> <given-names>F.</given-names></name></person-group> (<year>2023</year>). <source>Affective Bodily Awareness</source>. Cambridge: Cambridge University Press. doi: <pub-id pub-id-type="doi">10.1017/9781009209717</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Desmedt</surname> <given-names>O.</given-names></name> <name><surname>Van Den Houte</surname> <given-names>M.</given-names></name> <name><surname>Walentynowicz</surname> <given-names>M.</given-names></name> <name><surname>Dekeyser</surname> <given-names>S.</given-names></name> <name><surname>Luminet</surname> <given-names>O.</given-names></name> <name><surname>Corneille</surname> <given-names>O.</given-names></name></person-group> (<year>2020</year>). <article-title>A systematic review and meta-analysis on the association between heartbeat counting task performance and mental disorders and their risk factors among adults</article-title>. <source>Preprint.</source> doi: <pub-id pub-id-type="doi">10.31219/osf.io/h3by9</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>dos Santos Oliveira</surname> <given-names>J. C.</given-names></name> <name><surname>da Trindade Duarte</surname> <given-names>J. M.</given-names></name> <name><surname>Sim&#x000F5;es-Zenari</surname> <given-names>M.</given-names></name> <name><surname>Nemr</surname> <given-names>K.</given-names></name></person-group> (<year>2024</year>). <article-title>Risk of dysphonia, presence of vocal changes, and vocal self-perception in Brazilian transgender women</article-title>. <source>J. Voice</source>. doi: <pub-id pub-id-type="doi">10.1016/j.jvoice.2023.12.017</pub-id><pub-id pub-id-type="pmid">38302406</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Drabish</surname> <given-names>K.</given-names></name> <name><surname>Theeke</surname> <given-names>L. A.</given-names></name></person-group> (<year>2022</year>). <article-title>Health impact of stigma, discrimination, prejudice, and bias experienced by transgender people: a systematic review of quantitative studies</article-title>. <source>Iss. Mental Health Nurs.</source> <volume>43</volume>, <fpage>111</fpage>&#x02013;<lpage>118</lpage>. doi: <pub-id pub-id-type="doi">10.1080/01612840.2021.1961330</pub-id><pub-id pub-id-type="pmid">34469283</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Faul</surname> <given-names>F.</given-names></name> <name><surname>Erdfelder</surname> <given-names>E.</given-names></name> <name><surname>Lang</surname> <given-names>A.-G.</given-names></name> <name><surname>Buchner</surname> <given-names>A.</given-names></name></person-group> (<year>2007</year>). <article-title>G<sup>&#x0002A;</sup>Power 3: a flexible statistical power analysis program for the social, behavioral, and biomedical sciences</article-title>. <source>Behav. Res. Methods</source> <volume>39</volume>, <fpage>175</fpage>&#x02013;<lpage>191</lpage>. doi: <pub-id pub-id-type="doi">10.3758/BF03193146</pub-id><pub-id pub-id-type="pmid">17695343</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Fernyhough</surname> <given-names>C.</given-names></name> <name><surname>Russell</surname> <given-names>J.</given-names></name></person-group> (<year>1997</year>). <article-title>Distinguishing one&#x00027;s own voice from those of others: a function for private speech?</article-title> <source>Int. J. Behav. Dev.</source> <volume>20</volume>, <fpage>651</fpage>&#x02013;<lpage>665</lpage>. doi: <pub-id pub-id-type="doi">10.1080/016502597385108</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Graux</surname> <given-names>J.</given-names></name> <name><surname>Gomot</surname> <given-names>M.</given-names></name> <name><surname>Roux</surname> <given-names>S.</given-names></name> <name><surname>Bonnet-Brilhault</surname> <given-names>F.</given-names></name> <name><surname>Bruneau</surname> <given-names>N.</given-names></name></person-group> (<year>2015</year>). <article-title>Is my voice just a familiar voice? An electrophysiological study</article-title>. <source>Soc. Cogn. Affect. Neurosci.</source> <volume>10</volume>, <fpage>101</fpage>&#x02013;<lpage>105</lpage>. doi: <pub-id pub-id-type="doi">10.1093/scan/nsu031</pub-id><pub-id pub-id-type="pmid">24625786</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gross</surname> <given-names>J. J.</given-names></name> <name><surname>John</surname> <given-names>O. P.</given-names></name></person-group> (<year>2003</year>). <article-title>Individual differences in two emotion regulation processes: implications for affect, relationships, and well-being</article-title>. <source>J. Pers. Soc. Psychol.</source> <volume>85</volume>:<fpage>348</fpage>. doi: <pub-id pub-id-type="doi">10.1037/0022-3514.85.2.348</pub-id><pub-id pub-id-type="pmid">12916575</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Haggard</surname> <given-names>P.</given-names></name> <name><surname>de Boer</surname> <given-names>L.</given-names></name></person-group> (<year>2014</year>). <article-title>Oral somatosensory awareness</article-title>. <source>Neurosci. Biobehav. Rev.</source> <volume>47</volume>, <fpage>469</fpage>&#x02013;<lpage>484</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neubiorev.2014.09.015</pub-id><pub-id pub-id-type="pmid">25284337</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hancock</surname> <given-names>A. B.</given-names></name> <name><surname>Krissinger</surname> <given-names>J.</given-names></name> <name><surname>Owen</surname> <given-names>K.</given-names></name></person-group> (<year>2011</year>). <article-title>Voice perceptions and quality of life of transgender people</article-title>. <source>J. Voice</source> <volume>25</volume>, <fpage>553</fpage>&#x02013;<lpage>558</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jvoice.2010.07.013</pub-id><pub-id pub-id-type="pmid">21051199</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hancock</surname> <given-names>A. B.</given-names></name> <name><surname>Pool</surname> <given-names>S. F.</given-names></name></person-group> (<year>2017</year>). <article-title>Influence of listener characteristics on perceptions of sex and gender</article-title>. <source>J. Lang. Soc. Psychol.</source> <volume>36</volume>, <fpage>599</fpage>&#x02013;<lpage>610</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0261927X17704460</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hartig</surname> <given-names>F.</given-names></name></person-group> (<year>2024</year>). <source>DHARMa: Residual Diagnostics for Hierarchical (Multi-Level/Mixed) Regression Models</source>. R Package Version 0.4.7.</mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hatzenbuehler</surname> <given-names>M. L.</given-names></name></person-group> (<year>2009</year>). <article-title>How does sexual minority stigma &#x0201C;get under the skin&#x0201D;? A psychological mediation framework</article-title>. <source>Psychol. Bull.</source> <volume>135</volume>:<fpage>707</fpage>. doi: <pub-id pub-id-type="doi">10.1037/a0016441</pub-id><pub-id pub-id-type="pmid">19702379</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hatzenbuehler</surname> <given-names>M. L.</given-names></name> <name><surname>Lattanner</surname> <given-names>M. R.</given-names></name> <name><surname>McKetta</surname> <given-names>S.</given-names></name> <name><surname>Pachankis</surname> <given-names>J. E.</given-names></name></person-group> (<year>2024</year>). <article-title>Structural stigma and LGBTQ&#x0002B; health: a narrative review of quantitative studies</article-title>. <source>Lancet Public Health</source> <volume>9</volume>, <fpage>e109</fpage>&#x02013;<lpage>e127</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S2468-2667(23)00312-2</pub-id><pub-id pub-id-type="pmid">38307678</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Holzman</surname> <given-names>P. S.</given-names></name> <name><surname>Rousey</surname> <given-names>C.</given-names></name></person-group> (<year>1966</year>). <article-title>The voice as a percept</article-title>. <source>J. Pers. Soc. Psychol.</source> <volume>4</volume>:<fpage>79</fpage>. doi: <pub-id pub-id-type="doi">10.1037/h0023518</pub-id><pub-id pub-id-type="pmid">5965194</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hughes</surname> <given-names>S. M.</given-names></name> <name><surname>Harrison</surname> <given-names>M. A.</given-names></name></person-group> (<year>2013</year>). <article-title>I like my voice better: self-enhancement bias in perceptions of voice attractiveness</article-title>. <source>Perception</source> <volume>42</volume>, <fpage>941</fpage>&#x02013;<lpage>949</lpage>. doi: <pub-id pub-id-type="doi">10.1068/p7526</pub-id><pub-id pub-id-type="pmid">24386714</pub-id></mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hundhammer</surname> <given-names>T.</given-names></name> <name><surname>Mussweiler</surname> <given-names>T.</given-names></name></person-group> (<year>2012</year>). <article-title>How sex puts you in gendered shoes: sexuality-priming leads to gender-based self-perception and behavior</article-title>. <source>J. Pers. Soc. Psychol.</source> <volume>103</volume>, <fpage>176</fpage>&#x02013;<lpage>193</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0028121</pub-id><pub-id pub-id-type="pmid">22545746</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ito</surname> <given-names>T.</given-names></name> <name><surname>Tiede</surname> <given-names>M.</given-names></name> <name><surname>Ostry</surname> <given-names>D. J.</given-names></name></person-group> (<year>2009</year>). <article-title>Somatosensory function in speech perception</article-title>. <source>Proc. Natl. Acad. Sci. U.S.A.</source> <volume>106</volume>, <fpage>1245</fpage>&#x02013;<lpage>1248</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.0810063106</pub-id><pub-id pub-id-type="pmid">19164569</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>James</surname> <given-names>S.</given-names></name> <name><surname>Herman</surname> <given-names>J.</given-names></name> <name><surname>Rankin</surname> <given-names>S.</given-names></name> <name><surname>Keisling</surname> <given-names>M.</given-names></name> <name><surname>Mottet</surname> <given-names>L.</given-names></name> <name><surname>Anafi</surname> <given-names>M. A.</given-names></name></person-group> (<year>2016</year>). <source>The Report of the 2015 US Transgender Survey</source>. Washington, DC.</mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Joel</surname> <given-names>D.</given-names></name> <name><surname>Tarrasch</surname> <given-names>R.</given-names></name> <name><surname>Berman</surname> <given-names>Z.</given-names></name> <name><surname>Mukamel</surname> <given-names>M.</given-names></name> <name><surname>Ziv</surname> <given-names>E.</given-names></name></person-group> (<year>2014</year>). <article-title>Queering gender: studying gender identity in &#x02018;normative&#x00027;individuals</article-title>. <source>Psychol. Sex.</source> <volume>5</volume>, <fpage>291</fpage>&#x02013;<lpage>321</lpage>. doi: <pub-id pub-id-type="doi">10.1080/19419899.2013.830640</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jones</surname> <given-names>J. J.</given-names></name> <name><surname>Amin</surname> <given-names>M. R.</given-names></name> <name><surname>Kim</surname> <given-names>J.</given-names></name> <name><surname>Skiena</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>Stereotypical gender associations in language have decreased over time</article-title>. <source>Sociol. Sci.</source> <volume>7</volume>, <fpage>1</fpage>&#x02013;<lpage>35</lpage>. doi: <pub-id pub-id-type="doi">10.15195/v7.a1</pub-id></mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kallitsounaki</surname> <given-names>A.</given-names></name> <name><surname>Williams</surname> <given-names>D. M.</given-names></name></person-group> (<year>2023</year>). <article-title>Brief report: an exploration of alexithymia in autistic and nonautistic transgender adults</article-title>. <source>Autism Adulthood</source> <volume>5</volume>, <fpage>210</fpage>&#x02013;<lpage>216</lpage>. doi: <pub-id pub-id-type="doi">10.1089/aut.2022.0113</pub-id><pub-id pub-id-type="pmid">37346987</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kassambara</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <source>rstatix: Pipe-Friendly Framework for Basic Statistical Tests</source>.</mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kennedy</surname> <given-names>E.</given-names></name> <name><surname>Thibeault</surname> <given-names>S. L.</given-names></name></person-group> (<year>2020</year>). <article-title>Voice&#x02013;gender incongruence and voice health information&#x02013;seeking behaviors in the transgender community</article-title>. <source>Am. J. Speech Lang. Pathol.</source> <volume>29</volume>, <fpage>1563</fpage>&#x02013;<lpage>1573</lpage>. doi: <pub-id pub-id-type="doi">10.1044/2020_AJSLP-19-00188</pub-id><pub-id pub-id-type="pmid">32539455</pub-id></mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kent</surname> <given-names>R. D.</given-names></name></person-group> (<year>2024</year>). <article-title>The feel of speech: multisystem and polymodal somatosensation in speech production</article-title>. <source>J. Speech Lang. Hear. Res.</source> <volume>67</volume>, <fpage>1424</fpage>&#x02013;<lpage>1460</lpage>. doi: <pub-id pub-id-type="doi">10.1044/2024_JSLHR-23-00575</pub-id><pub-id pub-id-type="pmid">38593006</pub-id></mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kreukels</surname> <given-names>B. P. C.</given-names></name> <name><surname>Haraldsen</surname> <given-names>I. R.</given-names></name> <name><surname>De Cuypere</surname> <given-names>G.</given-names></name> <name><surname>Richter-Appelt</surname> <given-names>H.</given-names></name> <name><surname>Gijs</surname> <given-names>L.</given-names></name> <name><surname>Cohen-Kettenis</surname> <given-names>P. T.</given-names></name></person-group> (<year>2012</year>). <article-title>A European network for the investigation of gender incongruence: the ENIGI initiative</article-title>. <source>Eur. Psychiatry</source> <volume>27</volume>, <fpage>445</fpage>&#x02013;<lpage>450</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.eurpsy.2010.04.009</pub-id><pub-id pub-id-type="pmid">20620022</pub-id></mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kunz</surname> <given-names>E. M.</given-names></name> <name><surname>Krasa</surname> <given-names>B. A.</given-names></name> <name><surname>Kamdar</surname> <given-names>F.</given-names></name> <name><surname>Avansino</surname> <given-names>D. T.</given-names></name> <name><surname>Hahn</surname> <given-names>N.</given-names></name> <name><surname>Yoon</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Inner speech in motor cortex and implications for speech neuroprostheses</article-title>. <source>Cell</source> <volume>188</volume>, <fpage>4658</fpage>&#x02013;<lpage>4673</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cell.2025.06.015</pub-id><pub-id pub-id-type="pmid">40816265</pub-id></mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lenth</surname> <given-names>R.</given-names></name></person-group> (<year>2024</year>). <source>emmeans: Estimated Marginal Means, aka Least-Squares Means</source>. R Package Version 1.10.10.</mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Levant</surname> <given-names>R. F.</given-names></name> <name><surname>Hall</surname> <given-names>R. J.</given-names></name> <name><surname>Williams</surname> <given-names>C. M.</given-names></name> <name><surname>Hasan</surname> <given-names>N. T.</given-names></name></person-group> (<year>2009</year>). <article-title>Gender differences in alexithymia</article-title>. <source>Psychol. Men Mascul.</source> <volume>10</volume>:<fpage>190</fpage>. doi: <pub-id pub-id-type="doi">10.1037/a0015652</pub-id></mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lewis</surname> <given-names>M.</given-names></name> <name><surname>Lupyan</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). <article-title>Gender stereotypes are reflected in the distributional structure of 25 languages</article-title>. <source>Nat. Hum. Behav.</source> <volume>4</volume>, <fpage>1021</fpage>&#x02013;<lpage>1028</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41562-020-0918-6</pub-id><pub-id pub-id-type="pmid">32747806</pub-id></mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lindqvist</surname> <given-names>A.</given-names></name> <name><surname>Renstr&#x000F6;m</surname> <given-names>E. A.</given-names></name> <name><surname>Gustafsson Send&#x000E9;n</surname> <given-names>M.</given-names></name></person-group> (<year>2019</year>). <article-title>Reducing a male bias in language? Establishing the efficiency of three different gender-fair language strategies</article-title>. <source>Sex Roles</source> <volume>81</volume>, <fpage>109</fpage>&#x02013;<lpage>117</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11199-018-0974-9</pub-id></mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>L&#x00153;venbruck</surname> <given-names>H.</given-names></name> <name><surname>Grandchamp</surname> <given-names>R.</given-names></name> <name><surname>Rapin</surname> <given-names>L.</given-names></name> <name><surname>Nalborczyk</surname> <given-names>L.</given-names></name> <name><surname>Dohen</surname> <given-names>M.</given-names></name> <name><surname>Perrier</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>&#x0201C;A cognitive neuroscience view of inner language: To predict and to hear, see, feel,&#x0201D;</article-title> in <source>Inner speech: New voices</source>, eds. P. Langland-Hassan and A. Vicente (A cura di) (Oxford University Press), <fpage>131</fpage>&#x02013;<lpage>167</lpage>. doi: <pub-id pub-id-type="doi">10.1093/oso/9780198796640.003.0006</pub-id></mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Luminet</surname> <given-names>O.</given-names></name> <name><surname>Rim&#x000E9;</surname> <given-names>B.</given-names></name> <name><surname>Bagby</surname> <given-names>R. M.</given-names></name> <name><surname>Taylor</surname> <given-names>G.</given-names></name></person-group> (<year>2004</year>). <article-title>A multimodal investigation of emotional responding in alexithymia</article-title>. <source>Cogn. Emot.</source> <volume>18</volume>, <fpage>741</fpage>&#x02013;<lpage>766</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02699930341000275</pub-id></mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lumley</surname> <given-names>M. A.</given-names></name> <name><surname>Bazydlo</surname> <given-names>R. A.</given-names></name></person-group> (<year>2000</year>). <article-title>The relationship of alexithymia characteristics to dreaming</article-title>. <source>J. Psychosomatic Res.</source> <volume>48</volume>, <fpage>561</fpage>&#x02013;<lpage>567</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0022-3999(00)00096-9</pub-id><pub-id pub-id-type="pmid">11033375</pub-id></mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Maniaci</surname> <given-names>G.</given-names></name> <name><surname>Collura</surname> <given-names>G.</given-names></name> <name><surname>La Cascia</surname> <given-names>C.</given-names></name> <name><surname>Piccoli</surname> <given-names>T.</given-names></name> <name><surname>Bongiorno</surname> <given-names>E.</given-names></name> <name><surname>Barresi</surname> <given-names>I.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Beyond the gender binarism: neural correlates of trans men in a functional connectivity-resting-state fMRI pilot study</article-title>. <source>J. Clin. Med</source>. <volume>13</volume>:<fpage>5856</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jcm13195856</pub-id><pub-id pub-id-type="pmid">39407916</pub-id></mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Martel</surname> <given-names>M.</given-names></name> <name><surname>Cardinali</surname> <given-names>L.</given-names></name> <name><surname>Roy</surname> <given-names>A. C.</given-names></name> <name><surname>Farn&#x000E8;</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Tool-use: an open window into body representation and its plasticity</article-title>. <source>Cogn. Neuropsychol.</source> <volume>33</volume>, <fpage>82</fpage>&#x02013;<lpage>101</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02643294.2016.1167678</pub-id><pub-id pub-id-type="pmid">27315277</pub-id></mixed-citation>
</ref>
<ref id="B63">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mazzoli</surname> <given-names>F.</given-names></name> <name><surname>Cassioli</surname> <given-names>E.</given-names></name> <name><surname>Ristori</surname> <given-names>J.</given-names></name> <name><surname>Castellini</surname> <given-names>G.</given-names></name> <name><surname>Rossi</surname> <given-names>E.</given-names></name> <name><surname>Romani</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Appearent autistic traits in transgender people: a prospective study of the impact of gender-affirming hormonal treatment</article-title>. <source>J. Sexual Med.</source> <volume>19</volume>, <fpage>S60</fpage>&#x02013;<lpage>S61</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jsxm.2022.08.061</pub-id></mixed-citation>
</ref>
<ref id="B64">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McGuire</surname> <given-names>J. K.</given-names></name> <name><surname>Doty</surname> <given-names>J. L.</given-names></name> <name><surname>Catalpa</surname> <given-names>J. M.</given-names></name> <name><surname>Ola</surname> <given-names>C.</given-names></name></person-group> (<year>2016</year>). <article-title>Body image in transgender young people: findings from a qualitative, community based study</article-title>. <source>Body Image</source> <volume>18</volume>, <fpage>96</fpage>&#x02013;<lpage>107</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bodyim.2016.06.004</pub-id><pub-id pub-id-type="pmid">27352103</pub-id></mixed-citation>
</ref>
<ref id="B65">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mehling</surname> <given-names>W. E.</given-names></name> <name><surname>Acree</surname> <given-names>M.</given-names></name> <name><surname>Stewart</surname> <given-names>A.</given-names></name> <name><surname>Silas</surname> <given-names>J.</given-names></name> <name><surname>Jones</surname> <given-names>A.</given-names></name></person-group> (<year>2018</year>). <article-title>The multidimensional assessment of interoceptive awareness, version 2 (MAIA-2)</article-title>. <source>PLoS ONE</source> 13:e0208034. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0208034</pub-id><pub-id pub-id-type="pmid">30513087</pub-id></mixed-citation>
</ref>
<ref id="B66">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mehling</surname> <given-names>W. E.</given-names></name> <name><surname>Price</surname> <given-names>C.</given-names></name> <name><surname>Daubenmier</surname> <given-names>J. J.</given-names></name> <name><surname>Acree</surname> <given-names>M.</given-names></name> <name><surname>Bartmess</surname> <given-names>E.</given-names></name> <name><surname>Stewart</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>The multidimensional assessment of interoceptive awareness (MAIA)</article-title>. <source>PLoS One</source> 7:e48230. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0048230</pub-id></mixed-citation>
</ref>
<ref id="B67">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mehling</surname> <given-names>W. E.</given-names></name> <name><surname>Price</surname> <given-names>C.</given-names></name> <name><surname>Daubenmier</surname> <given-names>J. J.</given-names></name> <name><surname>Acree</surname> <given-names>M.</given-names></name> <name><surname>Bartmess</surname> <given-names>E.</given-names></name> <name><surname>Stewart</surname> <given-names>A.</given-names></name></person-group> (<year>2012</year>). <article-title>The multidimensional assessment of interoceptive awareness (MAIA)</article-title>. <source>PLoS ONE</source> 7:e48230. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0048230</pub-id></mixed-citation>
</ref>
<ref id="B68">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mirabella</surname> <given-names>M.</given-names></name> <name><surname>Carone</surname> <given-names>N.</given-names></name> <name><surname>Franco</surname> <given-names>A.</given-names></name> <name><surname>Rugo</surname> <given-names>M. A.</given-names></name> <name><surname>Speranza</surname> <given-names>A. M.</given-names></name> <name><surname>Mazzeschi</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2024a</year>). <article-title>Emotional dysregulation and eating symptoms in gender dysphoria and eating disorders: the mediating role of body uneasiness</article-title>. <source>Curr. Psychol.</source> 1&#x02013;15. doi: <pub-id pub-id-type="doi">10.1007/s12144-024-05663-9</pub-id></mixed-citation>
</ref>
<ref id="B69">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mirabella</surname> <given-names>M.</given-names></name> <name><surname>Giovanardi</surname> <given-names>G.</given-names></name> <name><surname>Fortunato</surname> <given-names>A.</given-names></name> <name><surname>Senofonte</surname> <given-names>G.</given-names></name> <name><surname>Lombardo</surname> <given-names>F.</given-names></name> <name><surname>Lingiardi</surname> <given-names>V.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>The body I live in. Perceptions and meanings of body dissatisfaction in young transgender adults: a qualitative study</article-title>. <source>J. Clin. Med.</source> <volume>9</volume>:<fpage>3733</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jcm9113733</pub-id><pub-id pub-id-type="pmid">33233761</pub-id></mixed-citation>
</ref>
<ref id="B70">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mirabella</surname> <given-names>M.</given-names></name> <name><surname>Mazzuca</surname> <given-names>C.</given-names></name> <name><surname>De Livio</surname> <given-names>C.</given-names></name> <name><surname>Di Giannantonio</surname> <given-names>B.</given-names></name> <name><surname>Rosati</surname> <given-names>F.</given-names></name> <name><surname>Lorusso</surname> <given-names>M. M.</given-names></name> <etal/></person-group>. (<year>2024b</year>). <article-title>The role of language in nonbinary identity construction: gender words matter</article-title>. <source>Psychol. Sexual Orient. Gender Divers</source>. doi: <pub-id pub-id-type="doi">10.1037/sgd0000729</pub-id></mixed-citation>
</ref>
<ref id="B71">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monti</surname> <given-names>A.</given-names></name> <name><surname>Porciello</surname> <given-names>G.</given-names></name> <name><surname>Panasiti</surname> <given-names>M. S.</given-names></name> <name><surname>Aglioti</surname> <given-names>S. M.</given-names></name></person-group> (<year>2022</year>). <article-title>The inside of me: interoceptive constraints on the concept of self in neuroscience and clinical psychology</article-title>. <source>Psychol. Res.</source> <volume>86</volume>, <fpage>2468</fpage>&#x02013;<lpage>2477</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00426-021-01477-7</pub-id><pub-id pub-id-type="pmid">34050431</pub-id></mixed-citation>
</ref>
<ref id="B72">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Morin</surname> <given-names>A.</given-names></name></person-group> (<year>2005</year>). <article-title>Possible links between self-awareness and inner speech theoretical background, underlying mechanisms, and empirical evidence</article-title>. <source>J. Conscious. Stud.</source> <volume>12</volume>, <fpage>115</fpage>&#x02013;<lpage>134</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.jstor.org">www.jstor.org</ext-link></mixed-citation>
</ref>
<ref id="B73">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Naraindas</surname> <given-names>A. M.</given-names></name> <name><surname>Moreno</surname> <given-names>M.</given-names></name> <name><surname>Cooney</surname> <given-names>S. M.</given-names></name></person-group> (<year>2023</year>). <article-title>Beyond gender: interoceptive sensibility as a key predictor of body image disturbances</article-title>. <source>Behav. Sci.</source> <volume>14</volume>:<fpage>25</fpage>. doi: <pub-id pub-id-type="doi">10.3390/bs14010025</pub-id><pub-id pub-id-type="pmid">38247677</pub-id></mixed-citation>
</ref>
<ref id="B74">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Naunheim</surname> <given-names>M. R.</given-names></name> <name><surname>DeVore</surname> <given-names>E. K.</given-names></name> <name><surname>Huston</surname> <given-names>M. N.</given-names></name> <name><surname>Song</surname> <given-names>P. C.</given-names></name> <name><surname>Franco Jr.</surname> <given-names>R. A.</given-names></name> <name><surname>Bhattacharyya</surname> <given-names>N.</given-names></name></person-group> (<year>2024</year>). <article-title>Increasing prevalence of voice disorders in the USA: updates in the COVID era</article-title>. <source>Laryngoscope</source>. doi: <pub-id pub-id-type="doi">10.1002/lary.31409</pub-id><pub-id pub-id-type="pmid">38525993</pub-id></mixed-citation>
</ref>
<ref id="B75">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Naunheim</surname> <given-names>M. R.</given-names></name> <name><surname>Puka</surname> <given-names>E.</given-names></name> <name><surname>Huston</surname> <given-names>M. N.</given-names></name></person-group> (<year>2023</year>). <article-title>Do you like your voice? A population-based survey of voice satisfaction and voice enhancement</article-title>. <source>Laryngoscope</source>. doi: <pub-id pub-id-type="doi">10.1002/lary.30822</pub-id><pub-id pub-id-type="pmid">37309825</pub-id></mixed-citation>
</ref>
<ref id="B76">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oestreicher-Kedem</surname> <given-names>Y.</given-names></name> <name><surname>Jacob</surname> <given-names>T.</given-names></name> <name><surname>Lior</surname> <given-names>Y.</given-names></name> <name><surname>Kurzrock</surname> <given-names>A.</given-names></name> <name><surname>Goldman</surname> <given-names>M.</given-names></name> <name><surname>Wasserzug</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Voice perception and mental health in transgender women</article-title>. <source>J. Voice</source>. doi: <pub-id pub-id-type="doi">10.1016/j.jvoice.2024.09.003</pub-id><pub-id pub-id-type="pmid">39393954</pub-id></mixed-citation>
</ref>
<ref id="B77">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Orepic</surname> <given-names>P.</given-names></name> <name><surname>Kannape</surname> <given-names>O. A.</given-names></name> <name><surname>Faivre</surname> <given-names>N.</given-names></name> <name><surname>Blanke</surname> <given-names>O.</given-names></name></person-group> (<year>2023</year>). <article-title>Bone conduction facilitates self-other voice discrimination</article-title>. <source>R. Soc. Open Sci.</source> <volume>10</volume>:<fpage>221561</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rsos.221561</pub-id><pub-id pub-id-type="pmid">36816848</pub-id></mixed-citation>
</ref>
<ref id="B78">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Orepic</surname> <given-names>P.</given-names></name> <name><surname>Park</surname> <given-names>H. D.</given-names></name> <name><surname>Rognini</surname> <given-names>G.</given-names></name> <name><surname>Faivre</surname> <given-names>N.</given-names></name> <name><surname>Blanke</surname> <given-names>O.</given-names></name></person-group> (<year>2022</year>). <article-title>Breathing affects self-other voice discrimination in a bodily state associated with somatic passivity</article-title>. <source>Psychophysiology</source> <volume>59</volume>:<fpage>e14016</fpage>. doi: <pub-id pub-id-type="doi">10.1111/psyp.14016</pub-id><pub-id pub-id-type="pmid">35150452</pub-id></mixed-citation>
</ref>
<ref id="B79">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Paoli</surname> <given-names>U. E.</given-names></name></person-group> (<year>1957</year>). <source>Come vivevano i Greci</source>. <publisher-loc>Torino</publisher-loc>: <publisher-name>Edizioni Radio Italiana</publisher-name>.</mixed-citation>
</ref>
<ref id="B80">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Peng</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name> <name><surname>Meng</surname> <given-names>L.</given-names></name> <name><surname>Liu</surname> <given-names>H.</given-names></name> <name><surname>Hu</surname> <given-names>Z.</given-names></name></person-group> (<year>2019</year>). <article-title>One&#x00027;s own and similar voices are more attractive than other voices</article-title>. <source>Aust. J. Psychol.</source> <volume>71</volume>, <fpage>212</fpage>&#x02013;<lpage>222</lpage>. doi: <pub-id pub-id-type="doi">10.1111/ajpy.12235</pub-id></mixed-citation>
</ref>
<ref id="B81">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pennebaker</surname> <given-names>J. W.</given-names></name> <name><surname>Roberts</surname> <given-names>T. A.</given-names></name></person-group> (<year>1992</year>). <article-title>Toward a his and hers theory of emotion: gender differences in visceral perception</article-title>. <source>J. Soc. Clin. Psychol.</source> <volume>11</volume>, <fpage>199</fpage>&#x02013;<lpage>212</lpage>. doi: <pub-id pub-id-type="doi">10.1521/jscp.1992.11.3.199</pub-id></mixed-citation>
</ref>
<ref id="B82">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Perrone-Bertolotti</surname> <given-names>M.</given-names></name> <name><surname>Kujala</surname> <given-names>J.</given-names></name> <name><surname>Vidal</surname> <given-names>J. R.</given-names></name> <name><surname>Hamame</surname> <given-names>C. M.</given-names></name> <name><surname>Ossandon</surname> <given-names>T.</given-names></name> <name><surname>Bertrand</surname> <given-names>O.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>How silent is silent reading? Intracerebral evidence for top-down activation of temporal voice areas during reading</article-title>. <source>J. Neurosci.</source> <volume>32</volume>, <fpage>17554</fpage>&#x02013;<lpage>17562</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.2982-12.2012</pub-id><pub-id pub-id-type="pmid">23223279</pub-id></mixed-citation>
</ref>
<ref id="B83">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pesciarelli</surname> <given-names>F.</given-names></name> <name><surname>Scorolli</surname> <given-names>C.</given-names></name> <name><surname>Cacciari</surname> <given-names>C.</given-names></name></person-group> (<year>2019</year>). <article-title>Neural correlates of the implicit processing of grammatical and stereotypical gender violations: a masked and unmasked priming study</article-title>. <source>Biol. Psychol.</source> <volume>146</volume>:<fpage>107714</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.biopsycho.2019.06.002</pub-id><pub-id pub-id-type="pmid">31185245</pub-id></mixed-citation>
</ref>
<ref id="B84">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pezzulo</surname> <given-names>G.</given-names></name></person-group> (<year>2014</year>). <article-title>Why do you fear the bogeyman? An embodied predictive coding model of perceptual inference</article-title>. <source>Cogn. Affect. Behav. Neurosci.</source> <volume>14</volume>, <fpage>902</fpage>&#x02013;<lpage>911</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13415-013-0227-x</pub-id><pub-id pub-id-type="pmid">24307092</pub-id></mixed-citation>
</ref>
<ref id="B85">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pinna</surname> <given-names>F.</given-names></name> <name><surname>Paribello</surname> <given-names>P.</given-names></name> <name><surname>Somaini</surname> <given-names>G.</given-names></name> <name><surname>Corona</surname> <given-names>A.</given-names></name> <name><surname>Ventriglio</surname> <given-names>A.</given-names></name> <name><surname>Corrias</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Mental health in transgender individuals: a systematic review</article-title>. <source>Int. Rev. Psychiatry</source> <volume>34</volume>, <fpage>292</fpage>&#x02013;<lpage>359</lpage>. doi: <pub-id pub-id-type="doi">10.1080/09540261.2022.2093629</pub-id><pub-id pub-id-type="pmid">36151828</pub-id></mixed-citation>
</ref>
<ref id="B86">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>P&#x000F6;rschmann</surname> <given-names>C.</given-names></name></person-group> (<year>2000</year>). <article-title>Influences of bone conduction and air conduction on the sound of one&#x00027;s own voice</article-title>. <source>Acta Acustica United Acustica</source> <volume>86</volume>, <fpage>1038</fpage>&#x02013;<lpage>1045</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://www.researchgate.net">www.researchgate.net</ext-link></mixed-citation>
</ref>
<ref id="B87">
<mixed-citation publication-type="book"><collab>Posit Team</collab> (<year>2025</year>). <source>RStudio: Integrated Development Environment for R</source>. <publisher-loc>Boston, MA</publisher-loc>: <publisher-name>Posit Software, PBC</publisher-name>.</mixed-citation>
</ref>
<ref id="B88">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prentice</surname> <given-names>F.</given-names></name> <name><surname>Murphy</surname> <given-names>J.</given-names></name></person-group> (<year>2022</year>). <article-title>Sex differences in interoceptive accuracy: a meta-analysis</article-title>. <source>Neurosci. Biobehav. Rev.</source> <volume>132</volume>, <fpage>497</fpage>&#x02013;<lpage>518</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neubiorev.2021.11.030</pub-id><pub-id pub-id-type="pmid">34838927</pub-id></mixed-citation>
</ref>
<ref id="B89">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Priulla</surname> <given-names>G.</given-names></name> <name><surname>Sammartino</surname> <given-names>G.</given-names></name></person-group> (<year>2020</year>). <source>L&#x00027;abbecedario degli stereotipi di genere (Illustrazioni di M. Banci)</source>. <publisher-loc>Parma</publisher-loc>: <publisher-name>NFC Edizioni</publisher-name>.</mixed-citation>
</ref>
<ref id="B90">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pu</surname> <given-names>S.</given-names></name> <name><surname>Goldberg</surname> <given-names>L.</given-names></name> <name><surname>Ren</surname> <given-names>J.</given-names></name> <name><surname>Goldberg</surname> <given-names>A. C.</given-names></name> <name><surname>Courey</surname> <given-names>M.</given-names></name></person-group> (<year>2025</year>). <article-title>Physical features contributing to gender dysphoria: the role of voice</article-title>. <source>Otolaryngol. Head Neck Surg.</source> <volume>172</volume>, <fpage>2018</fpage>&#x02013;<lpage>2025</lpage>. doi: <pub-id pub-id-type="doi">10.1002/ohn.1207</pub-id><pub-id pub-id-type="pmid">40105482</pub-id></mixed-citation>
</ref>
<ref id="B91">
<mixed-citation publication-type="book"><collab>R Core Team</collab> (<year>2023</year>). <source>R: A Language and Environment for Statistical Computing</source>. <publisher-loc>Vienna</publisher-loc>: <publisher-name>R Foundation for Statistical Computing</publisher-name>.</mixed-citation>
</ref>
<ref id="B92">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Rava</surname> <given-names>C.</given-names></name></person-group> (<year>2012</year>). <source>Un mare di silenzio</source>. <publisher-loc>Milano</publisher-loc>: <publisher-name>Garzanti</publisher-name>.</mixed-citation>
</ref>
<ref id="B93">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reed</surname> <given-names>M. B.</given-names></name> <name><surname>Handschuh</surname> <given-names>P. A.</given-names></name> <name><surname>Kl&#x000F6;bl</surname> <given-names>M.</given-names></name> <name><surname>Konadu</surname> <given-names>M. E.</given-names></name> <name><surname>Kaufmann</surname> <given-names>U.</given-names></name> <name><surname>Hahn</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>The influence of sex steroid treatment on insular connectivity in gender dysphoria</article-title>. <source>Psychoneuroendocrinology</source> <volume>155</volume>:<fpage>106336</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.psyneuen.2023.106336</pub-id><pub-id pub-id-type="pmid">37499299</pub-id></mixed-citation>
</ref>
<ref id="B94">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ricciardelli</surname> <given-names>L. A.</given-names></name> <name><surname>McCabe</surname> <given-names>M. P.</given-names></name> <name><surname>Williams</surname> <given-names>R. J.</given-names></name> <name><surname>Thompson</surname> <given-names>J. K.</given-names></name></person-group> (<year>2007</year>). <article-title>The role of ethnicity and culture in body image and disordered eating among males</article-title>. <source>Clin. Psychol. Rev.</source> <volume>27</volume>, <fpage>582</fpage>&#x02013;<lpage>606</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cpr.2007.01.016</pub-id><pub-id pub-id-type="pmid">17341436</pub-id></mixed-citation>
</ref>
<ref id="B95">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Romani</surname> <given-names>A.</given-names></name> <name><surname>Mazzoli</surname> <given-names>F.</given-names></name> <name><surname>Ristori</surname> <given-names>J.</given-names></name> <name><surname>Cocchetti</surname> <given-names>C.</given-names></name> <name><surname>Cassioli</surname> <given-names>E.</given-names></name> <name><surname>Castellini</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Psychological wellbeing and perceived social acceptance in gender diverse individuals</article-title>. <source>J. Sexual Med.</source> <volume>18</volume>, <fpage>1933</fpage>&#x02013;<lpage>1944</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jsxm.2021.08.012</pub-id><pub-id pub-id-type="pmid">34749989</pub-id></mixed-citation>
</ref>
<ref id="B96">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Russell</surname> <given-names>S. T.</given-names></name> <name><surname>Fish</surname> <given-names>J. N.</given-names></name></person-group> (<year>2016</year>). <article-title>Mental health in lesbian, gay, bisexual, and transgender (LGBT) youth</article-title>. <source>Annu. Rev. Clin. Psychol.</source> <volume>12</volume>, <fpage>465</fpage>&#x02013;<lpage>487</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-clinpsy-021815-093153</pub-id><pub-id pub-id-type="pmid">26772206</pub-id></mixed-citation>
</ref>
<ref id="B97">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Scandurra</surname> <given-names>C.</given-names></name> <name><surname>Bochicchio</surname> <given-names>V.</given-names></name> <name><surname>Dolce</surname> <given-names>P.</given-names></name> <name><surname>Carava</surname> <given-names>C.</given-names></name> <name><surname>Vitelli</surname> <given-names>R.</given-names></name> <name><surname>Testa</surname> <given-names>R. J.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>The Italian validation of the gender minority stress and resilience measure</article-title>. <source>Psychol. Sexual Orient. Gender Divers.</source> <volume>7</volume>:<fpage>208</fpage>. doi: <pub-id pub-id-type="doi">10.1037/sgd0000366</pub-id></mixed-citation>
</ref>
<ref id="B98">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schielzeth</surname> <given-names>H.</given-names></name> <name><surname>Dingemanse</surname> <given-names>N. J.</given-names></name> <name><surname>Nakagawa</surname> <given-names>S.</given-names></name> <name><surname>Westneat</surname> <given-names>D. F.</given-names></name> <name><surname>Allegue</surname> <given-names>H.</given-names></name> <name><surname>Teplitsky</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Robustness of linear mixed-effects models to violations of distributional assumptions</article-title>. <source>Methods Ecol. Evol.</source> <volume>11</volume>, <fpage>1141</fpage>&#x02013;<lpage>1152</lpage>. doi: <pub-id pub-id-type="doi">10.1111/2041-210X.13434</pub-id></mixed-citation>
</ref>
<ref id="B99">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sczesny</surname> <given-names>S.</given-names></name> <name><surname>Formanowicz</surname> <given-names>M.</given-names></name> <name><surname>Moser</surname> <given-names>F.</given-names></name></person-group> (<year>2016</year>). <article-title>Can gender-fair language reduce gender stereotyping and discrimination?</article-title> <source>Front. Psychol.</source> <volume>7</volume>:<fpage>25</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2016.00025</pub-id><pub-id pub-id-type="pmid">26869947</pub-id></mixed-citation>
</ref>
<ref id="B100">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Seth</surname> <given-names>A. K.</given-names></name></person-group> (<year>2013</year>). <article-title>Interoceptive inference, emotion, and the embodied self</article-title>. <source>Trends Cogn. Sci.</source> <volume>17</volume>, <fpage>565</fpage>&#x02013;<lpage>573</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2013.09.007</pub-id><pub-id pub-id-type="pmid">24126130</pub-id></mixed-citation>
</ref>
<ref id="B101">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Skewes</surname> <given-names>L.</given-names></name> <name><surname>Fine</surname> <given-names>C.</given-names></name> <name><surname>Haslam</surname> <given-names>N.</given-names></name></person-group> (<year>2018</year>). <article-title>Beyond Mars and Venus: the role of gender essentialism in support for gender inequality and backlash</article-title>. <source>PLoS ONE</source> <volume>13</volume>:<fpage>e0200921</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0200921</pub-id><pub-id pub-id-type="pmid">30040839</pub-id></mixed-citation>
</ref>
<ref id="B102">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Smeltzer</surname> <given-names>J. C.</given-names></name> <name><surname>Chiou</surname> <given-names>S. H.</given-names></name> <name><surname>Shembel</surname> <given-names>A. C.</given-names></name></person-group> (<year>2023</year>). <article-title>Interoception, voice symptom reporting, and voice disorders</article-title>. <source>J. Voice</source>. <pub-id pub-id-type="pmid">37012093</pub-id></mixed-citation>
</ref>
<ref id="B103">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Steele</surname> <given-names>J. R.</given-names></name> <name><surname>Ambady</surname> <given-names>N.</given-names></name></person-group> (<year>2006</year>). <article-title>&#x0201C;Math is hard!&#x0201D; The effect of gender priming on women&#x00027;s attitudes</article-title>. <source>J. Exp. Soc. Psychol.</source> <volume>42</volume>, <fpage>428</fpage>&#x02013;<lpage>436</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jesp.2005.06.003</pub-id></mixed-citation>
</ref>
<ref id="B104">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stenfelt</surname> <given-names>S.</given-names></name></person-group> (<year>2016</year>). <article-title>Model predictions for bone conduction perception in the human</article-title>. <source>Hear. Res.</source> <volume>340</volume>, <fpage>135</fpage>&#x02013;<lpage>143</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2015.10.014</pub-id><pub-id pub-id-type="pmid">26657096</pub-id></mixed-citation>
</ref>
<ref id="B105">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Testa</surname> <given-names>R. J.</given-names></name> <name><surname>Habarth</surname> <given-names>J.</given-names></name> <name><surname>Peta</surname> <given-names>J.</given-names></name> <name><surname>Balsam</surname> <given-names>K.</given-names></name> <name><surname>Bockting</surname> <given-names>W.</given-names></name></person-group> (<year>2015</year>). <article-title>Development of the gender minority stress and resilience measure</article-title>. <source>Psychol. Sexual Orient. Gender Divers.</source> <volume>2</volume>:<fpage>65</fpage>. doi: <pub-id pub-id-type="doi">10.1037/sgd0000081</pub-id></mixed-citation>
</ref>
<ref id="B106">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Thompson</surname> <given-names>A. E.</given-names></name> <name><surname>Voyer</surname> <given-names>D.</given-names></name></person-group> (<year>2014</year>). <article-title>Sex differences in the ability to recognise non-verbal displays of emotion: a meta-analysis</article-title>. <source>Cogn. Emot.</source> <volume>28</volume>, <fpage>1164</fpage>&#x02013;<lpage>1195</lpage>. doi: <pub-id pub-id-type="doi">10.1080/02699931.2013.875889</pub-id><pub-id pub-id-type="pmid">24400860</pub-id></mixed-citation>
</ref>
<ref id="B107">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Thompson</surname> <given-names>J. K.</given-names></name> <name><surname>Roehrig</surname> <given-names>M.</given-names></name> <name><surname>Cafri</surname> <given-names>G.</given-names></name> <name><surname>Heinberg</surname> <given-names>L. J.</given-names></name></person-group> (<year>2005</year>). <article-title>&#x0201C;Assessment of body image disturbance,&#x0201D;</article-title> in <source>Assessment of Eating Disorders</source>, eds. J. E. Mitchell and C. B. Peterson (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>Guilford Press</publisher-name>), <fpage>175</fpage>&#x02013;<lpage>202</lpage>.</mixed-citation>
</ref>
<ref id="B108">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Truszczynski</surname> <given-names>N.</given-names></name> <name><surname>Singh</surname> <given-names>A. A.</given-names></name> <name><surname>Hansen</surname> <given-names>N.</given-names></name></person-group> (<year>2022</year>). <article-title>The discrimination experiences and coping responses of non-binary and trans people</article-title>. <source>J. Homosexual.</source> <volume>69</volume>, <fpage>741</fpage>&#x02013;<lpage>755</lpage>. doi: <pub-id pub-id-type="doi">10.1080/00918369.2020.1855028</pub-id><pub-id pub-id-type="pmid">33331799</pub-id></mixed-citation>
</ref>
<ref id="B109">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tsakiris</surname> <given-names>M.</given-names></name> <name><surname>Jim&#x000E9;nez</surname> <given-names>A. T.</given-names></name> <name><surname>Costantini</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>Just a heartbeat away from one&#x00027;s body: interoceptive sensitivity predicts malleability of body-representations</article-title>. <source>Proc. R. Soc. B Biol. Sci.</source> <volume>278</volume>, <fpage>2470</fpage>&#x02013;<lpage>2476</lpage>. doi: <pub-id pub-id-type="doi">10.1098/rspb.2010.2547</pub-id><pub-id pub-id-type="pmid">21208964</pub-id></mixed-citation>
</ref>
<ref id="B110">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tsakiris</surname> <given-names>M.</given-names></name> <name><surname>Prabhu</surname> <given-names>G.</given-names></name> <name><surname>Haggard</surname> <given-names>P.</given-names></name></person-group> (<year>2006</year>). <article-title>Having a body versus moving your body: how agency structures body-ownership</article-title>. <source>Conscious. Cogn.</source> <volume>15</volume>, <fpage>423</fpage>&#x02013;<lpage>432</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.concog.2005.09.004</pub-id><pub-id pub-id-type="pmid">16343947</pub-id></mixed-citation>
</ref>
<ref id="B111">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van de Grift</surname> <given-names>T. C.</given-names></name> <name><surname>Cohen-Kettenis</surname> <given-names>P. T.</given-names></name> <name><surname>Elaut</surname> <given-names>E.</given-names></name> <name><surname>De Cuypere</surname> <given-names>G. R. E. T. A.</given-names></name> <name><surname>Richter-Appelt</surname> <given-names>H.</given-names></name> <name><surname>Haraldsen</surname> <given-names>I. R.</given-names></name> <etal/></person-group>. (<year>2016a</year>). <article-title>A network analysis of body satisfaction of people with gender dysphoria</article-title>. <source>Body Image</source> <volume>17</volume>, <fpage>184</fpage>&#x02013;<lpage>190</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bodyim.2016.04.002</pub-id><pub-id pub-id-type="pmid">27137814</pub-id></mixed-citation>
</ref>
<ref id="B112">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van de Grift</surname> <given-names>T. C.</given-names></name> <name><surname>Cohen-Kettenis</surname> <given-names>P. T.</given-names></name> <name><surname>Steensma</surname> <given-names>T. D.</given-names></name> <name><surname>De Cuypere</surname> <given-names>G.</given-names></name> <name><surname>Richter-Appelt</surname> <given-names>H.</given-names></name> <name><surname>Haraldsen</surname> <given-names>I. R.</given-names></name> <etal/></person-group>. (<year>2016b</year>). <article-title>Body satisfaction and physical appearance in gender dysphoria</article-title>. <source>Arch. Sexual Behav.</source> <volume>45</volume>, <fpage>575</fpage>&#x02013;<lpage>585</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10508-015-0614-1</pub-id><pub-id pub-id-type="pmid">26474976</pub-id></mixed-citation>
</ref>
<ref id="B113">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vilhauer</surname> <given-names>R. P.</given-names></name></person-group> (<year>2016</year>). <article-title>Inner reading voices: an overlooked form of inner speech</article-title>. <source>Psychosis</source> <volume>8</volume>, <fpage>37</fpage>&#x02013;<lpage>47</lpage>. doi: <pub-id pub-id-type="doi">10.1080/17522439.2015.1028972</pub-id></mixed-citation>
</ref>
<ref id="B114">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Welch</surname> <given-names>B.</given-names></name> <name><surname>Helou</surname> <given-names>L. B.</given-names></name></person-group> (<year>2022</year>). <article-title>Measuring communicative congruence and communicative dysphoria in a sample of individuals without voice disorders</article-title>. <source>J. Speech Lang. Hear. Res.</source> <volume>65</volume>, <fpage>3420</fpage>&#x02013;<lpage>3437</lpage>. doi: <pub-id pub-id-type="doi">10.1044/2022_JSLHR-21-00459</pub-id><pub-id pub-id-type="pmid">36054879</pub-id></mixed-citation>
</ref>
<ref id="B115">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wickham</surname> <given-names>H.</given-names></name></person-group> (<year>2016</year>). <source>ggplot2: Elegant Graphics for Data Analysis</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer-Verlag</publisher-name>. doi: <pub-id pub-id-type="doi">10.1007/978-3-319-24277-4_9</pub-id></mixed-citation>
</ref>
<ref id="B116">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wickham</surname> <given-names>H.</given-names></name> <name><surname>Fran&#x000E7;ois</surname> <given-names>R.</given-names></name> <name><surname>Henry</surname> <given-names>L.</given-names></name> <name><surname>M&#x000FC;ller</surname> <given-names>K.</given-names></name></person-group> (<year>2023</year>). <source>dplyr: a Grammar of Data Manipulation</source>. R Package Version 1.1.4.</mixed-citation>
</ref>
<ref id="B117">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>M.</given-names></name> <name><surname>Homae</surname> <given-names>F.</given-names></name> <name><surname>Hashimoto</surname> <given-names>R. I.</given-names></name> <name><surname>Hagiwara</surname> <given-names>H.</given-names></name></person-group> (<year>2013</year>). <article-title>Acoustic cues for the recognition of self-voice and other-voice</article-title>. <source>Front. Psychol.</source> <volume>4</volume>:<fpage>53056</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2013.00735</pub-id><pub-id pub-id-type="pmid">24133475</pub-id></mixed-citation>
</ref>
<ref id="B118">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yao</surname> <given-names>B.</given-names></name> <name><surname>Belin</surname> <given-names>P.</given-names></name> <name><surname>Scheepers</surname> <given-names>C.</given-names></name></person-group> (<year>2011</year>). <article-title>Silent reading of direct versus indirect speech activates voice-selective areas in the auditory cortex</article-title>. <source>J. Cogn. Neurosci.</source> <volume>23</volume>, <fpage>3146</fpage>&#x02013;<lpage>3152</lpage>. doi: <pub-id pub-id-type="doi">10.1162/jocn_a_00022</pub-id><pub-id pub-id-type="pmid">21452944</pub-id></mixed-citation>
</ref>
<ref id="B119">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zaidel</surname> <given-names>A.</given-names></name> <name><surname>Salomon</surname> <given-names>R.</given-names></name></person-group> (<year>2023</year>). <article-title>Multisensory decisions from self to world</article-title>. <source>Philos. Trans. R. Soc. B</source> <volume>378</volume>:<fpage>20220335</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rstb.2022.0335</pub-id><pub-id pub-id-type="pmid">37545311</pub-id></mixed-citation>
</ref>
<ref id="B120">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ziltzer</surname> <given-names>R. S.</given-names></name> <name><surname>Lett</surname> <given-names>E.</given-names></name> <name><surname>Su-Genyk</surname> <given-names>P.</given-names></name> <name><surname>Chambers</surname> <given-names>T.</given-names></name> <name><surname>Moayer</surname> <given-names>R.</given-names></name></person-group> (<year>2023</year>). <article-title>Needs assessment of gender-affirming face, neck, and voice procedures and the role of gender dysphoria</article-title>. <source>Otolaryngol. Head Neck Surg.</source> <volume>169</volume>, <fpage>906</fpage>&#x02013;<lpage>916</lpage>. doi: <pub-id pub-id-type="doi">10.1002/ohn.329</pub-id><pub-id pub-id-type="pmid">36942914</pub-id></mixed-citation>
</ref>
<ref id="B121">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zimman</surname> <given-names>L.</given-names></name></person-group> (<year>2018</year>). <article-title>Transgender voices: insights on identity, embodiment, and the gender of the voice</article-title>. <source>Lang. Linguist. Compass</source> <volume>12</volume>:<fpage>e12284</fpage>. doi: <pub-id pub-id-type="doi">10.1111/lnc3.12284</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/400868/overview">Marta Calbi</ext-link>, University of Milan, Italy</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/257473/overview">Martina Ardizzi</ext-link>, University of Parma, Italy</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2436895/overview">Greta Riboli</ext-link>, Universit&#x000E0; di Sigmund Freud Milano, Italy</p>
</fn>
</fn-group>
</back>
</article>