<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Virtual Real.</journal-id>
<journal-title>Frontiers in Virtual Reality</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Virtual Real.</abbrev-journal-title>
<issn pub-type="epub">2673-4192</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1470382</article-id>
<article-id pub-id-type="doi">10.3389/frvir.2024.1470382</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Virtual Reality</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Speech-in-noise testing in virtual reality</article-title>
<alt-title alt-title-type="left-running-head">Ram&#xed;rez et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2024.1470382">10.3389/frvir.2024.1470382</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Ram&#xed;rez</surname>
<given-names>Melissa</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1753965/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>M&#xfc;ller</surname>
<given-names>Alexander</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Arend</surname>
<given-names>Johannes M.</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1697052/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Himmelein</surname>
<given-names>Hendrik</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Rader</surname>
<given-names>Tobias</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1879269/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/resources/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>P&#xf6;rschmann</surname>
<given-names>Christoph</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1760196/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Institute of Computer and Communication Technology</institution>, <institution>TH K&#xf6;ln - University of Applied Sciences</institution>, <addr-line>Cologne</addr-line>, <country>Germany</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Audio Communication Group</institution>, <institution>Technische Universit&#xe4;t Berlin</institution>, <addr-line>Berlin</addr-line>, <country>Germany</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Acoustics Lab</institution>, <institution>Department of Information and Communications Engineering</institution>, <institution>Aalto University</institution>, <addr-line>Espoo</addr-line>, <country>Finland</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Department of Otorhinolaryngology</institution>, <institution>Division of Audiology</institution>, <institution>University Hospital</institution>, <institution>Ludwig-Maximilians-University Munich (LMU)</institution>, <addr-line>Munich</addr-line>, <country>Germany</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/885615/overview">Mehdi Abouzari</ext-link>, University of California, Irvine, United States</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1326977/overview">Sasan Dabiri</ext-link>, Northern Ontario School of Medicine University, Canada</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2859179/overview">Euyhyun Park</ext-link>, Korea University, Republic of Korea</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Melissa Ram&#xed;rez, <email>melissa.ramirez@th-koeln.de</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>07</day>
<month>11</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>5</volume>
<elocation-id>1470382</elocation-id>
<history>
<date date-type="received">
<day>25</day>
<month>07</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>24</day>
<month>10</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Ram&#xed;rez, M&#xfc;ller, Arend, Himmelein, Rader and P&#xf6;rschmann.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Ram&#xed;rez, M&#xfc;ller, Arend, Himmelein, Rader and P&#xf6;rschmann</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>The potential of virtual reality (VR) in supporting hearing research and audiological care has long been recognized. While allowing the creation of experimental settings that closely resemble real-life scenarios and potentially leading to more ecologically valid results, VR could also support the current need for automated or remote assessment of auditory processing abilities in clinical settings. Understanding speech in competing noise is the most common complaint of patients with hearing difficulties, and the need to develop tools that can simplify speech-in-noise testing by reducing the time and resources required while improving the ecological validity of current assessment procedures is an area of great research interest. However, the use of VR for speech-in-noise testing has not yet been widely adopted because it is still unclear whether subjects respond to virtual stimuli the same way as they would in real-life settings. Using headphone-based binaural presentation, delivering visuals through head-mounted displays (HMDs), and using unsupervised (self-testing or remote) procedures are some aspects of virtualization that could potentially affect speech-in-noise measures, and the extent of this potential impact remains unclear. Before virtualization can be considered feasible, its effects on behavioral psychoacoustic measures must be understood. Thus, the ability to reproduce results from typical laboratory and clinical settings in VR environments is a major topic of current research. In this study, we sought to answer whether it is possible to reproduce results from a standard speech-in-noise test using state-of-the-art technology and commercially available VR peripherals. To this end, we compared the results of a well-established speech-in-noise test conducted in a conventional loudspeaker-based laboratory setting with those obtained in three different virtual environments. In each environment, we introduced one aspect of virtualization, i.e., virtual audio presentation in the first environment, HMD-based visuals with a visual anchor representing the target speaker in the second, and an alternative feedback- and scoring method allowing unsupervised testing in the last. Our results indicate that the speech-in-noise measures from the loudspeaker-based measurement and those from the virtual scenes were all statistically identical, suggesting that conducting speech-in-noise testing in state-of-the-art VR environments may be feasible even without experimenter supervision.</p>
</abstract>
<kwd-group>
<kwd>binaural hearing</kwd>
<kwd>speech reception thresholds</kwd>
<kwd>spatial release from masking</kwd>
<kwd>virtual reality</kwd>
<kwd>tele-audiology</kwd>
</kwd-group>
<contract-sponsor id="cn001">Bundesministerium f&#xfc;r Bildung und Forschung<named-content content-type="fundref-id">10.13039/501100002347</named-content>
</contract-sponsor>
<contract-sponsor id="cn002">Deutsche Forschungsgemeinschaft<named-content content-type="fundref-id">10.13039/501100001659</named-content>
</contract-sponsor>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Virtual Reality in Medicine</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>The cocktail party is a perfect metaphor for the auditory complexity of everyday life (<xref ref-type="bibr" rid="B53">Middlebrooks et al., 2017</xref>). Noisy classrooms, crowded restaurants, and busy offices are just a few examples of typical complex acoustic environments in which our auditory system demonstrates its ability to focus on signals of interest, such as the speech of a particular speaker in the presence of competing speech or background noise (<xref ref-type="bibr" rid="B83">Werner et al., 2012</xref>).</p>
<p>Speech intelligibility in noisy environments relies heavily on binaural processing, and the role of spatial hearing in this ability is well-established in the literature (<xref ref-type="bibr" rid="B34">Hawley et al., 2004</xref>). Speech intelligibility is enhanced when target speech and competing noise are spatially separated (<xref ref-type="bibr" rid="B19">Bronkhorst, 2000</xref>; <xref ref-type="bibr" rid="B29">Dirks and Wilson, 1969</xref>) compared to when they are colocated (<xref ref-type="bibr" rid="B36">Hess et al., 2018</xref>; <xref ref-type="bibr" rid="B65">Peng and Litovsky, 2022</xref>). This enhancement, known as spatial release from masking (SRM), can be measured as the difference in speech reception thresholds (SRTs) between the spatially separated and colocated noise conditions (<xref ref-type="bibr" rid="B31">Garadat et al., 2009</xref>; <xref ref-type="bibr" rid="B34">Hawley et al., 2004</xref>; <xref ref-type="bibr" rid="B51">Litovsky, 2005</xref>).</p>
<p>Previous research has consistently shown that audiograms alone are insufficient to predict speech understanding difficulties in noisy environments or to reveal a person&#x2019;s functional hearing ability in real-life listening scenarios (<xref ref-type="bibr" rid="B73">Ruggles et al., 2011</xref>; <xref ref-type="bibr" rid="B75">Strelcyk and Dau, 2009</xref>). There is a high incidence of hearing difficulties, especially in noisy environments, among patients who do not exhibit measurable hearing threshold loss. This includes individuals with subclinical hearing loss or supra-threshold listening disorders and those with auditory processing disorders (<xref ref-type="bibr" rid="B9">Bellis and Bellis, 2015</xref>; <xref ref-type="bibr" rid="B6">Beck, 2023</xref>). As a result, it has been recommended for over 50 years to include speech-in-noise testing in routine hearing evaluations for all patients (<xref ref-type="bibr" rid="B24">Carhart and Tillman, 1970</xref>), even those with pure-tone normal-hearing (NH) thresholds (<xref ref-type="bibr" rid="B72">Roup et al., 2021</xref>). Speech-in-noise testing provides a more comprehensive understanding of a patient&#x2019;s hearing abilities and facilitates the implementation of more effective treatment strategies. However, despite the availability of a wide range of accurate speech-in-noise tests (<xref ref-type="bibr" rid="B10">Bench et al., 1979</xref>; <xref ref-type="bibr" rid="B23">Cameron and Dillon, 2007</xref>; <xref ref-type="bibr" rid="B47">Killion et al., 2004</xref>; <xref ref-type="bibr" rid="B60">Nilsson et al., 1994</xref>; <xref ref-type="bibr" rid="B61">Niquette et al., 2003</xref>; <xref ref-type="bibr" rid="B74">Soli and Wong, 2008</xref>; <xref ref-type="bibr" rid="B76">Taylor, 2003</xref>), recent data indicate that speech-in-noise abilities are still not regularly tested in routine hearing evaluations (<xref ref-type="bibr" rid="B6">Beck, 2023</xref>; <xref ref-type="bibr" rid="B57">Mueller et al., 2023</xref>).</p>
<p>It is a matter of concern that less than 20% of hearing healthcare professionals include speech-in-noise testing in their routine hearing assessments. In most cases, when speech intelligibility measures are included, they are limited to SRTs in quiet (<xref ref-type="bibr" rid="B6">Beck, 2023</xref>). This is primarily attributed to time and resource limitations in the typical clinical practice, including a shortage of healthcare professionals, the unavailability of complex setups such as loudspeaker arrangements in large and acoustically treated rooms (<xref ref-type="bibr" rid="B6">Beck, 2023</xref>; <xref ref-type="bibr" rid="B25">Clark et al., 2017</xref>; <xref ref-type="bibr" rid="B56">Mueller, 2016</xref>; <xref ref-type="bibr" rid="B57">Mueller et al., 2023</xref>), and the perceived lack of <italic>external validity</italic> of some assessment procedures, i.e., the extent to which results are likely to generalize to conditions beyond those in which the data were collected (<xref ref-type="bibr" rid="B7">Beechey, 2002</xref>), also commonly known as <italic>ecological validity</italic> (<xref ref-type="bibr" rid="B44">Keidser et al., 2020</xref>).</p>
<p>In contrast, speech-in-noise abilities have been extensively studied in laboratory-based research contexts, showing that several factors can affect speech intelligibility in NH and hearing-impaired listeners, including the spatial configuration of the sound sources, the acoustic properties of the listening environment, the type of masker (energetic or informational), and the spectral differences between the target and maskers, among others (<xref ref-type="bibr" rid="B2">Arbogast et al., 2005</xref>; <xref ref-type="bibr" rid="B13">Best et al., 2012</xref>; <xref ref-type="bibr" rid="B19">Bronkhorst, 2000</xref>; <xref ref-type="bibr" rid="B46">Kidd et al., 2005</xref>; <xref ref-type="bibr" rid="B68">Rader et al., 2013</xref>). However, laboratory-based studies often lack ecological validity because they are conducted in highly controlled environments that do not reflect real-life listening scenarios (<xref ref-type="bibr" rid="B43">Keidser, 2016</xref>). There is a need to improve ecological validity within behavioral hearing science. Current efforts focus on <italic>realism</italic>, i.e., the extent to which laboratory test conditions resemble those found in the everyday settings of interest (<xref ref-type="bibr" rid="B7">Beechey, 2002</xref>), and recent literature highlights the need to integrate perceptual variables that influence listening behavior in real-life scenarios into research paradigms and methods, such as the inclusion of visual information and the ability to make exploratory head movements (<xref ref-type="bibr" rid="B44">Keidser et al., 2020</xref>; <xref ref-type="bibr" rid="B78">Valzolgher, 2024</xref>).</p>
<p>Consequently, developing tools that can simplify speech-in-noise testing by reducing time and resource requirements (<xref ref-type="bibr" rid="B38">Jakien et al., 2017</xref>) while improving the ecological validity of current assessment procedures has become an area of great current research interest (<xref ref-type="bibr" rid="B43">Keidser, 2016</xref>; <xref ref-type="bibr" rid="B44">Keidser et al., 2020</xref>). With this study, we sought to answer whether speech-in-noise testing in virtual reality (VR) could be a viable solution to these challenges. Modern VR peripherals are affordable and portable devices that could improve clinical efficiency by allowing testing in any room, whether in a clinic or at home. In addition, the latest versions support standalone operation, further facilitating reproducibility and scalability of setups. VR technology has tremendous potential to support tele-audiology, improve the quality of care, and enhance the experience of patients and their families. However, the impact of virtualization on behavioral psychoacoustic measures must be investigated before it can be considered viable. To this end, we conducted a psychoacoustic study evaluating the ability to reproduce speech-in-noise outcomes from a conventional loudspeaker-based test setup using state-of-the-art technology and commercially available VR peripherals.</p>
<p>Virtualizing speech-in-noise testing involves significant modifications to the setups and procedures from the typical clinical practice. Therefore, to determine the potential impact of each of those changes on the test results, we started with a loudspeaker-based measurement setup, which we used as a baseline, and we gradually introduced different aspects of virtualization through three different virtual scenarios:<list list-type="simple">
<list-item>
<p>a) In the first virtual scenario (VR1), we replaced the loudspeaker-based auditory presentation with headphone-based dynamic (i.e., motion-compensated) binaural rendering.</p>
</list-item>
</list>
</p>
<p>Previous research in multimodal perception has highlighted a strong link between binaural cues and self-motion, emphasizing that exploratory head movements play an essential role in spatial auditory perception (<xref ref-type="bibr" rid="B33">Grange and Culling, 2016</xref>; <xref ref-type="bibr" rid="B32">Gaveau et al., 2022</xref>). When head movements are not restricted, listeners tend to turn their heads to increase the target signal level in one ear. This instinctive response often results in an improved signal-to-noise ratio (SNR), leading to improved SRTs (<xref ref-type="bibr" rid="B16">Brimijoin et al., 2012</xref>; <xref ref-type="bibr" rid="B48">Kock, 1950</xref>). Despite this knowledge, current clinical and laboratory practice still uses headphone-based <italic>static</italic> binaural rendering for speech-in-noise testing. Although this approach was introduced to avoid the need for loudspeaker-based setups in acoustically treated rooms and is widely used, it has some limitations. Headphone-based static binaural rendering results in internalized sound images, i.e., they are perceived as being located inside the listener&#x2019;s head (<xref ref-type="bibr" rid="B12">Best et al., 2020</xref>; <xref ref-type="bibr" rid="B15">Brimijoin et al., 2013</xref>). Additionally, static binaural rendering causes the virtual location of the signals to move along with the listener&#x2019;s head movements, which does not mimic real-world listening conditions.</p>
<p>Combining headphone-based binaural rendering with head-tracking, i.e., <italic>dynamic</italic> binaural rendering, overcomes these limitations. It allows the auditory environment to be updated in real-time according to the subject&#x2019;s head movements, increasing both realism and externalization (<xref ref-type="bibr" rid="B8">Begault et al., 2001</xref>; <xref ref-type="bibr" rid="B12">Best et al., 2020</xref>). Thus, aiming to improve the naturalness of testing conditions, we did not limit head movements nor used static binaural rendering in this study. This enriches the complexity of the stimuli by making dynamic binaural cues available and allows people to behave more similarly as they would in the real world when performing the listening task (see <xref ref-type="bibr" rid="B78">Valzolgher (2024)</xref> for a comprehensive review).<list list-type="simple">
<list-item>
<p>b) In the second scenario (VR2), we added (virtual) visual feedback with a visual anchor representing the target speaker in the virtual scene.</p>
</list-item>
</list>
</p>
<p>Vision is another modality influencing auditory spatial perception by aiding externalization and distance estimation (<xref ref-type="bibr" rid="B12">Best et al., 2020</xref>). The presentation of visual information congruent with the auditory environment supports the existence of an externalized sound source (<xref ref-type="bibr" rid="B15">Brimijoin, Boyd, and Akeroyd, 2013</xref>). Moreover, several psychophysical and neurophysiological studies have shown that auditory and visuospatial attention are linked such that when attention in one modality is focused on one location, attention in the other modality is also drawn there (<xref ref-type="bibr" rid="B22">Busse et al., 2005</xref>; <xref ref-type="bibr" rid="B77">Tiippana et al., 2011</xref>). This suggests that using a visual anchor at the location of the target speaker may aid listeners in directing their auditory attention to that location as well.</p>
<p>Although the graphics used in our implementation are still far from realistic, e.g., they do not include the speaker&#x2019;s facial expressions or other aspects that are highly relevant for speech understanding, such as lip movements (<xref ref-type="bibr" rid="B35">Helfer and Freyman, 2005</xref>; <xref ref-type="bibr" rid="B87">Yuan et al., 2021</xref>; <xref ref-type="bibr" rid="B85">Williams et al., 2023</xref>), we argue that the availability of the more reliable (albeit basic) visual information aids the brain in optimally calibrating the associations between auditory cues and spatial locations (<xref ref-type="bibr" rid="B37">Isaiah et al., 2014</xref>; <xref ref-type="bibr" rid="B79">Valzolgher et al., 2020</xref>). More importantly, we argue that a significant increase in the listeners&#x2019; SRTs, when tested in this environment compared to VR1, would reveal an (undesired) effect of presenting visual feedback through a head-movent-display (HMD) for this application. For example, the choice of visuals could increase cognitive load, potentially resulting in poorer performance. See Methods and Discussion for more details.<list list-type="simple">
<list-item>
<p>c) Last, in the third virtual scenario (VR3), we included an alternative feedback- and scoring method for unsupervised testing.</p>
</list-item>
</list>
</p>
<p>Speech intelligibility can be measured using an open- or closed-response set. With an open set (typical of clinical settings), the listener repeats aloud what they hear, and the tester rates these verbal responses as correct or incorrect. In a closed-set (forced-choice task), the listener chooses from a limited number of acceptable response alternatives. The response alternatives are usually presented visually (<xref ref-type="bibr" rid="B21">Buss et al., 2016</xref>). It is worth noting that closed sets generally result in reduced (better) SRTs. This is especially true when the response set contains few phonetically dissimilar alternatives (<xref ref-type="bibr" rid="B21">Buss et al., 2016</xref>; <xref ref-type="bibr" rid="B54">Miller et al., 1951</xref>). However, closed sets could facilitate testing without experimenter supervision, allowing self- or remote testing (<xref ref-type="bibr" rid="B38">Jakien et al., 2017</xref>) and supporting tele-audiology. In this setting, we tested whether allowing the participants to self-record their responses on the gamified HMD-based interface would affect the test scores relative to our baseline measure.</p>
<p>We invited three groups of NH subjects to participate in the study. Using a randomized mixed design, each group was tested by taking the German Hearing in Noise Test (HINT) (<xref ref-type="bibr" rid="B42">Joiko et al., 2021</xref>) in a loudspeaker-based setting (baseline condition) and in one of the three virtual scenarios (VR1, VR2, or VR3) presented via a head-mounted display (HMD) and headphones. The within-subjects conditions, i.e., baseline versus virtual, allowed us to evaluate the effect of using headphone-based dynamic binaural rendering with non-individual head-related transfer functions (HRTFs) compared to a conventional loudspeaker-based setup. The between-subjects conditions, i.e., the different virtual conditions, allowed us to measure the effect of introducing a visual anchor representing the target speaker in the virtual scene and introducing an alternative feedback- and scoring method for unsupervised testing.</p>
</sec>
<sec sec-type="methods" id="s2">
<title>2 Methods</title>
<sec id="s2-1">
<title>2.1 Participants</title>
<p>Forty-five subjects aged 19 to 65 (M &#x3d; 31.5&#xa0;years, Mdn &#x3d; 26&#xa0;years, <inline-formula id="inf1">
<mml:math id="m1">
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
</mml:math>
</inline-formula> &#x3d; 13.43&#xa0;years) voluntarily participated in the study (without compensation). They were engineering students or colleagues from the TH K&#xf6;ln. They all reported no hearing complaints and no history of hearing loss or auditory processing disorders in a questionnaire completed prior to participation in the study. This was used as inclusion criteria. All subjects were native German speakers, and about 42% had previously participated in other listening experiments.</p>
<p>The study was designed following the principles of the Declaration of Helsinki (<xref ref-type="bibr" rid="B86">World Medical Association, 2013</xref>) and the guidelines of the local institutional review board of the Institute of Computer and Communication Technology at the TH K&#xf6;ln. All participants gave written informed consent for their voluntary participation in the study and the later publication of the results. All personal data and experimental results were collected, processed, and archived according to country-specific data protection regulations.</p>
</sec>
<sec id="s2-2">
<title>2.2 Setup and stimuli</title>
<p>The experiment took place in the sound-insulated anechoic chamber of the acoustics laboratory of the TH K&#xf6;ln, which has dimensions of 4.5 &#xd7; 11.7 &#xd7; 2.3&#xa0;m (W&#xd7;D&#xd7;H), a lower cut-off frequency of about 200&#xa0;Hz, and a background noise level of about 20&#xa0;dB(A) SPL. We used three Genelec 8020D loudspeakers for the baseline measurement, i.e., the loudspeaker-based environment, and an Oculus Rift with a pair of Sennheiser HD600 headphones for the virtual environments. An RME Fireface UFXII interface connected to a PC controlled the loudspeakers and headphones.</p>
<p>We used the German hearing in noise test with a male target speaker, which includes twelve phonemically- and difficulty-matched 20-sentence lists and a spectrally matched masker noise. All sentences are four-to six-word-long simple sentences incorporating common nouns and verbs used at the elementary school level (<xref ref-type="bibr" rid="B42">Joiko et al., 2021</xref>).</p>
</sec>
<sec id="s2-3">
<title>2.3 Materials</title>
<sec id="s2-3-1">
<title>2.3.1 Loudspeaker-based environment (baseline)</title>
<p>Three loudspeakers were placed at ear level and 1&#xa0;m from the seated listener. They were placed at 0&#xb0;, 90&#xb0;, and 270&#xb0; azimuth, i.e., front, left, and right directions, respectively, and were visible to the subjects. Participants were asked to repeat what they heard using the standard HINT protocol, using an open-set feedback method (<xref ref-type="bibr" rid="B60">Nilsson, Soli, and Sullivan, 1994</xref>; <xref ref-type="bibr" rid="B74">Soli and Wong, 2008</xref>; <xref ref-type="bibr" rid="B42">Joiko et al., 2021</xref>). The experimenter recorded their responses using a Python application developed specifically for this experiment. More details can be found in the Experimental procedure section.</p>
</sec>
<sec id="s2-3-2">
<title>2.3.2 VR environments</title>
<p>All virtual environments used headphone-based dynamic binaural rendering with head tracking. Sound sources were located at 0&#xb0; and 90&#xb0; (or) 270&#xb0; azimuth, depending on the test condition, just like in the loudspeaker-based environment (see Experimental procedure section for more details).</p>
<p>We used the Unity wrapper for the 3D Tune-In Toolkit for dynamic binaural rendering because it is open-source, well-documented, and explicitly designed for hearing research (<xref ref-type="bibr" rid="B27">Cuevas-Rodr&#xed;guez et al., 2019</xref>; <xref ref-type="bibr" rid="B69">Reyes-Lecuona and Picinali, 2022</xref>). For spatialization, we used a full-spherical HRTF set from a Neumann KU100 dummy head in SOFA format (<xref ref-type="bibr" rid="B52">Majdak et al., 2022</xref>), which was measured on a Lebedev grid with 2702 spatial sampling points in the far field (<xref ref-type="bibr" rid="B11">Bernsch&#xfc;tz, 2013</xref>).</p>
<p>In addition, we applied a generic headphone compensation filter to the stimuli (target sentences and masker) to minimize the influence of the headphones used. The filter is based on twelve measurements (putting the headphones on and off the dummy head) to account for re-positioning variability and was designed by regularized inversion of the complex mean of the headphone transfer functions (<xref ref-type="bibr" rid="B49">Lindau and Brinkmann, 2012</xref>) using the implementation of <xref ref-type="bibr" rid="B30">Erbes et al. (2017)</xref>.<list list-type="simple">
<list-item>
<p>a) VR1: Audio-only (AO)</p>
</list-item>
</list>
</p>
<p>There was no visual representation of the target speaker&#x2019;s location in this environment. Instead, a black screen was projected through the HMD (<xref ref-type="fig" rid="F1">Figure 1</xref>). After the stimulus presentation, a visual icon and text appeared on the screen, indicating to the subjects that it was time to repeat what they had heard. The experimenter recorded their responses in the system.<list list-type="simple">
<list-item>
<p>b) VR2: Audiovisual (AV)</p>
</list-item>
</list>
</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Test environments.</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g001.tif"/>
</fig>
<p>We chose an open field as our visual environment because it closely mirrors the acoustic properties of the simulated auditory environment, which was anechoic (<xref ref-type="fig" rid="F1">Figure 1</xref>). Rather than visually modeling the anechoic test room and displaying a &#x201c;virtual experimenter,&#x201d; we chose a simple virtual scene designed to make participants feel as if they were somewhere else so that they could &#x201c;forget&#x201d; that someone was sitting there recording what they said. This approach, as other researchers have reported, increases participant comfort (<xref ref-type="bibr" rid="B59">Murphy, 2017</xref>). Moreover, our goal is to assess the feasibility of conducting virtual and remote testing and, in the future, inside more complex, i.e., realistic scenarios, visually and auditorily. Therefore, we are interested in using visual scenes that look different from a research lab.</p>
<p>We used a robot avatar in front of the subject to represent the target speaker (located at 0&#xb0; azimuth and 1&#xa0;m distance). After the stimulus presentation, the subjects repeated aloud what they had heard, and the experimenter recorded their responses in the system. The <xref ref-type="sec" rid="s12">Supplementary Material</xref> includes a short video illustrating some trials in this environment.<list list-type="simple">
<list-item>
<p>c) VR3: Audiovisual (AV) with word selection:</p>
</list-item>
</list>
</p>
<p>In this environment, otherwise identical to VR2, participants were asked to put together the target sentences word by word. The standard HINT procedure usually features an open set. However, being able to use a closed set may enable unsupervised testing. Thus, we presented five options for each word in a five-alternative forced-choice (5AFC) procedure.</p>
<p>Of these five options, only one was a correct word, while the other four were randomly selected alternatives from the sentence lists that were matched for length and capitalization, as capitalization is important in German for identifying words that are nouns. Participants had to choose each word to form the sentence, one at a time. Each decision about the current word was made before the alternatives for the next word were presented. Going back or changing previous answers was not possible (<xref ref-type="fig" rid="F2">Figure 2</xref>). The <xref ref-type="sec" rid="s12">Supplementary Material</xref> includes a short video illustrating some trials in this environment.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Example of different alternatives for a target sentence using the word selection system. The subjects&#x2019; task was to form the target sentence one word at a time using a five-alternative forced-choice (5AFC) procedure. Of the five alternatives presented, only one was a correct word, while the other four were random alternatives matched for length and capitalization. The options for the first word were presented first, and participants had to select the correct word before being presented with the options for the next word. Once a word was selected, participants could not go back or change their answers.</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g002.tif"/>
</fig>
</sec>
</sec>
<sec id="s2-4">
<title>2.4 Experimental procedure</title>
<p>The HINT procedure includes four conditions where the target speech is always in front of the listener (0&#xb0; azimuth). The noise (masker) is either at the same location, i.e., Noise Front (NF), shifted to the left (90&#xb0; azimuth) or right (270&#xb0; azimuth), i.e., Noise Left and Noise Right (NL and NR), or suppressed, i.e., Quiet (Q) (<xref ref-type="bibr" rid="B42">Joiko et al., 2021</xref>; <xref ref-type="bibr" rid="B55">M&#xf6;nnich et al., 2023</xref>; <xref ref-type="bibr" rid="B60">Nilsson et al., 1994</xref>; <xref ref-type="bibr" rid="B74">Soli and Wong, 2008</xref>). The order in which the noise conditions were presented was randomized, changing each time a list of twenty sentences was completed.</p>
<p>Following the HINT procedure described by <xref ref-type="bibr" rid="B74">Soli and Wong (2008)</xref>, speech and noise were initially presented at 65&#xa0;dB(A) SPL measured in the free field at the listener&#x2019;s position (see Presentation level calibration section for more details). The SNR was automatically adjusted based on the subject&#x2019;s performance using a 50% intelligibility criterion. It decreased if the subject repeated at least half of the sentence correctly, e.g., at least two words in a four-word sentence. Otherwise, it increased. The level of the masker remained constant, and the SNR was adjusted by increasing or decreasing the level of the target sentences. The procedure uses step sizes of 4&#xa0;dB for the first four sentences and 2&#xa0;dB for the remaining 16 sentences (per list). SRTs are calculated by averaging the SNR over sentences 5-20 (including the SNR for a 21st sentence determined from the response to the 20th sentence) for each noise condition (NF, NR, NL, and Q), as described by <xref ref-type="bibr" rid="B74">Soli and Wong (2008)</xref>.</p>
<p>Participants were randomly assigned to one of three groups. Each group underwent the HINT test in the loudspeaker-based environment and in one of the three different VR environments, as follows: <italic>Group A (n &#x3d; 15, mean age of 33.4</italic>&#xa0;<italic>years)</italic> was tested in the loudspeaker-based and VR1 environments, <italic>group B (n &#x3d; 15, mean age of 29.5</italic>&#xa0;<italic>years)</italic> was tested in the loudspeaker-based and VR2 environments, and <italic>group C (n &#x3d; 15, mean age of 31.5</italic>&#xa0;<italic>years)</italic> in the loudspeaker-based and VR3 environments.</p>
<p>The order in which subjects took the test in both environments (loudspeaker-based or virtual) was randomized across participants. Each participant completed four sentence lists (80 sentences) per test environment (160 sentences in total). No sentence or list was repeated per participant.</p>
<p>Before each test (loudspeaker-based or virtual), participants were informed that their task was to repeat aloud (or log into the system for VR3) what they heard. They were informed that they did not have to keep their head still (as in many laboratory-based listening experiments) and that dynamic binaural rendering was available in the headphone-based conditions. So they knew that they could move their head naturally if they wanted to, as in the loudspeaker-based condition. However, they were not given any verbal or written instructions about optimal head orientation strategies to preserve the undirected nature of the behavioral experiment concerning head movements.</p>
<p>After receiving instructions, participants had the opportunity to complete a practice run (5 sentences) in a random test condition (NL, NR, NF, or Q) to familiarize themselves with the test environment. This was followed by time to ask any questions they had before the test began. All subjects were given a 10&#x2013;15&#xa0;min break between tests.</p>
<p>Both tests took place in the same room (the anechoic chamber of the acoustics laboratory of the TH K&#xf6;ln). Participants sat in the same chair in the middle of the loudspeaker-based setup. The only difference between test conditions was whether the auditory presentation was loudspeaker-based or headphone-based and whether the subjects wore the HMD or not.</p>
</sec>
<sec id="s2-5">
<title>2.5 Presentation level calibration</title>
<p>First, we adjusted the presentation level for the loudspeaker-based condition. We played the masker noise on each loudspeaker and adjusted their level independently until the free-field sound pressure level at the listener&#x2019;s position was 65&#xa0;dB(A). Then, for the headphone-based presentation, i.e., all VR conditions, we placed the dummy head (Neumann KU100) in the listener&#x2019;s position, played the same stimuli on the central loudspeaker (0&#xb0; azimuth), and measured the electrical level produced at the dummy head&#x2019;s ears. Subsequently, the headphones were placed on the ears of the dummy head, and the same stimuli (noise signal from 0&#xb0; azimuth) were played again via binaural rendering to adjust the headphone presentation level to the same electrical level.</p>
</sec>
<sec id="s2-6">
<title>2.6 Parameters and statistical analysis</title>
<p>Individuals may have different SRTs between the NL and NR test conditions. This difference may be due to (common) asymmetries between left and right hearing thresholds, cochlear function, neural processing, or head orientation strategies. Notably, in our study, these differences did not exceed <inline-formula id="inf2">
<mml:math id="m2">
<mml:mrow>
<mml:mn>1</mml:mn>
<mml:mtext>&#x2009;dB</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>, as we tested only NH adults. Thus, to simplify the results&#x2019; interpretation, we averaged each participant&#x2019;s SRTs from the NL and NR conditions. This resulted in one outcome measure for the spatially separated test conditions (NL and NR averaged) and one for the colocated noise condition (NF).</p>
<p>Following, we calculated the SRM as the difference between the SRTs in the spatially separated and colocated noise conditions (<xref ref-type="disp-formula" rid="e1">Equation 1</xref>).<disp-formula id="e1">
<mml:math id="m3">
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>R</mml:mi>
<mml:mi>M</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>R</mml:mi>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mi>L</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>R</mml:mi>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mi>R</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mn>2</mml:mn>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>S</mml:mi>
<mml:mi>R</mml:mi>
<mml:mi>T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
<label>(1)</label>
</disp-formula>
</p>
<p>For statistical analysis, we performed a Bayesian repeated measures analysis of variance (ANOVA) with default priors (r scale fixed effects of <inline-formula id="inf3">
<mml:math id="m4">
<mml:mrow>
<mml:mo>.</mml:mo>
<mml:mn>5</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>, r scale random effects of <inline-formula id="inf4">
<mml:math id="m5">
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>) for SRM (which includes NL, NR, and NF noise conditions) and for the SRTs in quiet with the within-subjects factor <monospace>environment[loudspeaker-based, virtual]</monospace> and the between-subjects factor <monospace>group [A, B, C]</monospace>(<xref ref-type="bibr" rid="B45">Keysers, et al., 2020</xref>; <xref ref-type="bibr" rid="B70">Rouder et al., 2012</xref>).</p>
<p>We performed posthoc testing through individual comparisons based on the default t-test with a Cauchy <inline-formula id="inf5">
<mml:math id="m6">
<mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mn>0</mml:mn>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mo>.</mml:mo>
<mml:mn>707</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> prior (<xref ref-type="bibr" rid="B71">Rouder et al., 2009</xref>; <xref ref-type="bibr" rid="B80">Wagenmakers, 2007</xref>) and corrected for multiple testing by fixing to <inline-formula id="inf6">
<mml:math id="m7">
<mml:mrow>
<mml:mo>.</mml:mo>
<mml:mn>5</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> the prior probability that the null hypothesis holds across all comparisons (<xref ref-type="bibr" rid="B84">Westfall et al., 1997</xref>). All statistical analyses were performed using the Bayesian Repeated Measures ANOVA module of the Jamovi software package (<xref ref-type="bibr" rid="B39">Jamovi, 2022</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="s3">
<title>3 Results</title>
<sec id="s3-1">
<title>3.1 SRTs in competing noise</title>
<p>
<xref ref-type="fig" rid="F3">Figure 3</xref> shows the calculated SRTs as a function of the spatially separated and colocated noise conditions for all groups in both loudspeaker-based and virtual environments. The box plots show the individual SRTs per participant as points (with a horizontal offset for better readability).</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Speech reception thresholds (SRTs) as a function of noise condition (spatially separated and colocated) per test group and test environment. The individual SRTs per participant are shown as points per test condition. The boxes represent the (across participants) interquartile range (IQR), the means are shown as white points, and the medians are shown as solid black lines. The whiskers display 1.5 &#xd7; IQR below the 25th or above the 75th percentile, and asterisks indicate outliers beyond that range.</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g003.tif"/>
</fig>
<p>The mean SRTs for the spatially separated noise conditions range from <inline-formula id="inf7">
<mml:math id="m8">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>14.6</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> to <inline-formula id="inf8">
<mml:math id="m9">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>14.0</mml:mn>
<mml:mtext>&#x2009;dB&#x2009;SNR</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> across groups and test environments, while the mean SRTs for the colocated noise conditions range from <inline-formula id="inf9">
<mml:math id="m10">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>6.0</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> to <inline-formula id="inf10">
<mml:math id="m11">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>5.7</mml:mn>
<mml:mtext>&#x2009;dB&#x2009;SNR</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>The variance in the results from the VR1 environment (Group A) is noticeably higher than the variance in the other groups. In particular, it is more pronounced towards lower (better) SRTs in the VR1 environment than in the loudspeaker-based baseline within the same group. This is the case for six of the fifteen participants in this group (40%) and holds for the same participants in both the spatially separated and the colocated noise conditions.</p>
<p>The effect may be due to the choice of visual feedback provided in the VR1 environment, as the auditory headphone-based presentation was the same in all other virtual environments, and this is not seen in the within-subjects comparison (against the loudspeaker-based baseline in the same group). One possible explanation is that the lack of visual feedback may have benefited some participants by allowing them to focus more on the auditory task, similar to closing their eyes and paying more attention to the auditory stimuli. This may have resulted in an advantage (compared to the loudspeaker-based measurement) where they could see the anechoic room, the loudspeaker array, and the experimenter, which may have distracted them and influenced their responses. However, as this effect appears to be homogeneous for both spatially separated and collocated noise conditions, it does not significantly affect our main outcome measure, the SRM values.</p>
<p>
<xref ref-type="sec" rid="s12">Supplementary Figure S1</xref> shows the raw SRTs before averaging NL and NR conditions.</p>
</sec>
<sec id="s3-2">
<title>3.2 SRM</title>
<p>The data presented in <xref ref-type="fig" rid="F4">Figure 4</xref> shows the calculated SRM values for each subject per test environment and group. The trend lines at the bottom of the figure show individual performance trends across test environments. The line colors indicate an improvement (green) or deterioration (red) in SRM in the virtual environment compared to the baseline measurement in the loudspeaker-based environment.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Spatial release from masking (SRM) as a function of test environment per group. The individual SRM values per participant are shown as points per test condition. Top: The boxes represent the (across participants) interquartile range (IQR). The means are shown as white points, and the medians are shown as solid black lines. The whiskers display 1.5 &#xd7; IQR below the 25th or above the 75th percentile, and asterisks indicate outliers beyond that range. Bottom: Trend lines connect the results per participant. The color of the line indicates improved (in green) or deteriorated (in red) SRM in the virtual environment compared to the baseline measurement in the loudspeaker-based environment.</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g004.tif"/>
</fig>
<p>The means of SRM across all groups and test environments range from <inline-formula id="inf11">
<mml:math id="m12">
<mml:mrow>
<mml:mn>8.4</mml:mn>
<mml:mtext>&#x2009;dB</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> to <inline-formula id="inf12">
<mml:math id="m13">
<mml:mrow>
<mml:mn>9.1</mml:mn>
<mml:mtext>&#x2009;dB</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>The Bayesian repeated measures ANOVA for SRM with default priors showed that the predictive performance <inline-formula id="inf13">
<mml:math id="m14">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mrow>
<mml:mfenced open="|" close="" separators="&#x7c;">
<mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> of the null model was higher than the predictive performance of all the rival models with and without each factor and their interaction (<xref ref-type="table" rid="T1">Table 1</xref>).</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Bayesian repeated measures ANOVA: Model Comparison - SRM.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Models</th>
<th align="center">
<inline-formula id="inf14">
<mml:math id="m15">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>M</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf15">
<mml:math id="m16">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mrow>
<mml:mfenced open="|" close="" separators="&#x7c;">
<mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf16">
<mml:math id="m17">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mi>M</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf17">
<mml:math id="m18">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mn>01</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf18">
<mml:math id="m19">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<inline-formula id="inf19">
<mml:math id="m20">
<mml:mrow>
<mml:mtext>Null&#x2009;model</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf20">
<mml:math id="m21">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf21">
<mml:math id="m22">
<mml:mrow>
<mml:mn>0.60490</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf22">
<mml:math id="m23">
<mml:mrow>
<mml:mn>6.1241</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf23">
<mml:math id="m24">
<mml:mrow>
<mml:mn>1.00</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left"/>
</tr>
<tr>
<td align="center">
<inline-formula id="inf24">
<mml:math id="m25">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf25">
<mml:math id="m26">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf26">
<mml:math id="m27">
<mml:mrow>
<mml:mn>0.21322</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf27">
<mml:math id="m28">
<mml:mrow>
<mml:mn>1.0840</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf28">
<mml:math id="m29">
<mml:mrow>
<mml:mn>2.84</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf29">
<mml:math id="m30">
<mml:mrow>
<mml:mn>0.924</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf30">
<mml:math id="m31">
<mml:mrow>
<mml:mtext>Group</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf31">
<mml:math id="m32">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf32">
<mml:math id="m33">
<mml:mrow>
<mml:mn>0.12802</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf33">
<mml:math id="m34">
<mml:mrow>
<mml:mn>0.5873</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf34">
<mml:math id="m35">
<mml:mrow>
<mml:mn>4.72</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf35">
<mml:math id="m36">
<mml:mrow>
<mml:mn>0.833</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf36">
<mml:math id="m37">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">G</mml:mi>
<mml:mtext>roup</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf37">
<mml:math id="m38">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf38">
<mml:math id="m39">
<mml:mrow>
<mml:mn>0.04525</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf39">
<mml:math id="m40">
<mml:mrow>
<mml:mn>0.1896</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf40">
<mml:math id="m41">
<mml:mrow>
<mml:mn>13.37</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf41">
<mml:math id="m42">
<mml:mrow>
<mml:mn>1.271</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf42">
<mml:math id="m43">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">G</mml:mi>
<mml:mtext>roup</mml:mtext>
<mml:mo>&#x2b;</mml:mo>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2217;</mml:mo>
<mml:mtext>Group</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf43">
<mml:math id="m44">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf44">
<mml:math id="m45">
<mml:mrow>
<mml:mn>0.00860</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf45">
<mml:math id="m46">
<mml:mrow>
<mml:mn>0.0347</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf46">
<mml:math id="m47">
<mml:mrow>
<mml:mn>70.33</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf47">
<mml:math id="m48">
<mml:mrow>
<mml:mn>1.727</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>There is positive (moderate) evidence that the null model is more likely than the models including the factor <monospace>group</monospace>
<inline-formula id="inf48">
<mml:math id="m49">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>4.72</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula>
<monospace>,</monospace>and strong evidence of the absence of an effect of both factors <monospace>[environment &#x2b; group]</monospace> <inline-formula id="inf49">
<mml:math id="m50">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>13.73</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> and both factors and their interaction <monospace>[environment &#x2b; group &#x2b; environment&#x2a;group]</monospace> <inline-formula id="inf50">
<mml:math id="m51">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>70.33</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula>. However, the null model is only <inline-formula id="inf51">
<mml:math id="m52">
<mml:mrow>
<mml:mn>2.84</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula> times more likely than those including the factor <monospace>environment</monospace>, and even though the data tends to prove the absence of an effect of <monospace>environment</monospace>, it is still inconclusive <inline-formula id="inf52">
<mml:math id="m53">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mfrac bevelled="true">
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x3c;</mml:mo>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="bibr" rid="B45">Keysers, et al., 2020</xref>).</p>
<p>The analysis of effects across all models, however, reveals evidence of the absence of an effect of environment, group, and their interaction (all <inline-formula id="inf53">
<mml:math id="m54">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3c;</mml:mo>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mfrac bevelled="true">
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:math>
</inline-formula> ) (<xref ref-type="table" rid="T2">Table 2</xref>), and a <italic>post hoc</italic> pairwise comparison confirms evidence for the absence of an effect of test environment in SRM <inline-formula id="inf54">
<mml:math id="m55">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>3.84</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="table" rid="T3">Table 3</xref>).</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>Analysis of effects - SRM.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Effects</th>
<th align="center">
<inline-formula id="inf55">
<mml:math id="m56">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf56">
<mml:math id="m57">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
<mml:mrow>
<mml:mfenced open="|" close="" separators="&#x7c;">
<mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf57">
<mml:math id="m58">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<inline-formula id="inf58">
<mml:math id="m59">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf59">
<mml:math id="m60">
<mml:mrow>
<mml:mn>0.600</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf60">
<mml:math id="m61">
<mml:mrow>
<mml:mn>0.26707</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf61">
<mml:math id="m62">
<mml:mrow>
<mml:mn>0.2429</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf62">
<mml:math id="m63">
<mml:mrow>
<mml:mtext>Group</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf63">
<mml:math id="m64">
<mml:mrow>
<mml:mn>0.600</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf64">
<mml:math id="m65">
<mml:mrow>
<mml:mn>0.18187</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf65">
<mml:math id="m66">
<mml:mrow>
<mml:mn>0.1482</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf66">
<mml:math id="m67">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2217;</mml:mo>
<mml:mi mathvariant="normal">G</mml:mi>
<mml:mtext>roup</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf67">
<mml:math id="m68">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf68">
<mml:math id="m69">
<mml:mrow>
<mml:mn>0.00860</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf69">
<mml:math id="m70">
<mml:mrow>
<mml:mn>0.0347</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>Posthoc comparison &#x2013; Environment (SRM).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="2" align="center"/>
<th align="center">Prior Odds</th>
<th align="center">Posterior Odds</th>
<th align="center">
<inline-formula id="inf70">
<mml:math id="m71">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf71">
<mml:math id="m72">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Loudspeaker-based</td>
<td align="center">Virtual</td>
<td align="center">
<inline-formula id="inf72">
<mml:math id="m73">
<mml:mrow>
<mml:mn>1.00</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf73">
<mml:math id="m74">
<mml:mrow>
<mml:mn>3.84</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf74">
<mml:math id="m75">
<mml:mrow>
<mml:mn>3.84</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf75">
<mml:math id="m76">
<mml:mrow>
<mml:mn>0.0486</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Since the statistical analysis revealed no effect of group, we show the pooled data across groups in <xref ref-type="fig" rid="F5">Figure 5</xref> to provide a clearer visualization of the similarity between the SRM measures across test environments. The average SRM across subjects <inline-formula id="inf76">
<mml:math id="m77">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>n</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>45</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> when tested in a conventional loudspeaker-based system was <inline-formula id="inf77">
<mml:math id="m78">
<mml:mrow>
<mml:mn>8.5</mml:mn>
<mml:mtext>&#x2009;dB</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>. In comparison, the average SRM increased slightly to <inline-formula id="inf78">
<mml:math id="m79">
<mml:mrow>
<mml:mn>8.8</mml:mn>
<mml:mtext>&#x2009;dB</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula> when subjects were tested in virtual environments.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>Spatial release from masking (SRM) as a function of test environment (pooled over groups). The individual SRM values per participant are shown as points per test condition. Left: The boxes represent the (across participants) interquartile range (IQR), the means are shown as white points, and the medians are shown as solid black lines. The whiskers display 1.5 &#xd7; IQR below the 25th or above the 75th percentile, and asterisks indicate outliers beyond that range. Right: Trend lines connect the results per participant. The color of the line indicates improved (in green) or deteriorated (in red) SRM in the virtual environment compared to the baseline measurement in the loudspeaker-based environment.</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g005.tif"/>
</fig>
</sec>
<sec id="s3-3">
<title>3.3 SRTs in quiet</title>
<p>
<xref ref-type="fig" rid="F6">Figure 6</xref> displays the SRTs in quiet for each group and test environment in dB (A) SPL, i.e., free-field sound pressure level at the listener&#x2019;s position. The means of SRTs in quiet across all groups and test environments range from <inline-formula id="inf79">
<mml:math id="m80">
<mml:mrow>
<mml:mn>13.4</mml:mn>
<mml:mtext>&#x2009;dB</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi mathvariant="normal">A</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> to <inline-formula id="inf80">
<mml:math id="m81">
<mml:mrow>
<mml:mn>16.2</mml:mn>
<mml:mtext>&#x2009;dB</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi mathvariant="normal">A</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula> SPL.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>Speech reception thresholds (SRTs) in quiet per test group and environment. The individual SRTs per participant are shown as points per test condition. The boxes represent the (across participants) interquartile range (IQR), the means are shown as white points, and the medians are shown as solid black lines. The whiskers display 1.5 &#xd7; IQR below the 25th or above the 75th percentile, and asterisks indicate outliers beyond that range.</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g006.tif"/>
</fig>
<p>The Bayesian repeated measures ANOVA for the SRTs in quiet revealed positive (moderate) evidence in favor of the absence of an effect of <monospace>environment</monospace>
<inline-formula id="inf81">
<mml:math id="m82">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>3.665</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> and in favor of the absence of an effect of both factors <monospace>[environment &#x2b; group]</monospace> <inline-formula id="inf82">
<mml:math id="m83">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>8.341</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula>. However, for the models including the factor <monospace>group</monospace> and the full model (including both factors and their interaction), the evidence is too weak to be conclusive, i.e., there is an absence of evidence <inline-formula id="inf83">
<mml:math id="m84">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mfrac bevelled="true">
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x3c;</mml:mo>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="table" rid="T4">Table 4</xref>).</p>
<table-wrap id="T4" position="float">
<label>TABLE 4</label>
<caption>
<p>Bayesian ANOVA: Model Comparison&#x2013;SRTs in quiet.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Models</th>
<th align="center">
<inline-formula id="inf84">
<mml:math id="m85">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>M</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf85">
<mml:math id="m86">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>M</mml:mi>
<mml:mrow>
<mml:mfenced open="|" close="" separators="&#x7c;">
<mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf86">
<mml:math id="m87">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mi>M</mml:mi>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf87">
<mml:math id="m88">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mn>01</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf88">
<mml:math id="m89">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<inline-formula id="inf89">
<mml:math id="m90">
<mml:mrow>
<mml:mtext>Null&#x2009;model</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf90">
<mml:math id="m91">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf91">
<mml:math id="m92">
<mml:mrow>
<mml:mn>0.3029</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf92">
<mml:math id="m93">
<mml:mrow>
<mml:mn>1.738</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf93">
<mml:math id="m94">
<mml:mrow>
<mml:mn>1.000</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="left"/>
</tr>
<tr>
<td align="center">
<inline-formula id="inf94">
<mml:math id="m95">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf95">
<mml:math id="m96">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf96">
<mml:math id="m97">
<mml:mrow>
<mml:mn>0.0827</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf97">
<mml:math id="m98">
<mml:mrow>
<mml:mn>0.360</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf98">
<mml:math id="m99">
<mml:mrow>
<mml:mn>3.665</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf99">
<mml:math id="m100">
<mml:mrow>
<mml:mn>1.09</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf100">
<mml:math id="m101">
<mml:mrow>
<mml:mi mathvariant="normal">G</mml:mi>
<mml:mtext>roup</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf101">
<mml:math id="m102">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf102">
<mml:math id="m103">
<mml:mrow>
<mml:mn>0.1356</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf103">
<mml:math id="m104">
<mml:mrow>
<mml:mn>0.627</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf104">
<mml:math id="m105">
<mml:mrow>
<mml:mn>2.234</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf105">
<mml:math id="m106">
<mml:mrow>
<mml:mn>1.43</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf106">
<mml:math id="m107">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">G</mml:mi>
<mml:mtext>roup</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf107">
<mml:math id="m108">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf108">
<mml:math id="m109">
<mml:mrow>
<mml:mn>0.0363</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf109">
<mml:math id="m110">
<mml:mrow>
<mml:mn>0.151</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf110">
<mml:math id="m111">
<mml:mrow>
<mml:mn>8.341</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf111">
<mml:math id="m112">
<mml:mrow>
<mml:mn>1.11</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf112">
<mml:math id="m113">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2b;</mml:mo>
<mml:mtext>Group</mml:mtext>
<mml:mo>&#x2b;</mml:mo>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2217;</mml:mo>
<mml:mtext>Group</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf113">
<mml:math id="m114">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf114">
<mml:math id="m115">
<mml:mrow>
<mml:mn>0.4426</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf115">
<mml:math id="m116">
<mml:mrow>
<mml:mn>3.176</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf116">
<mml:math id="m117">
<mml:mrow>
<mml:mn>0.684</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf117">
<mml:math id="m118">
<mml:mrow>
<mml:mn>1.39</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The analysis of effects across matched models provides further evidence that the null model is twelve times more likely than those including the interaction between test <monospace>environment</monospace> and <monospace>group</monospace> <inline-formula id="inf133">
<mml:math id="m134">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mtext>incl</mml:mtext>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>12.187</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula>, confirms the absence of an effect of <monospace>environment</monospace> <inline-formula id="inf134">
<mml:math id="m135">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mtext>incl</mml:mtext>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>0.271</mml:mn>
<mml:mo>&#x3c;</mml:mo>
<mml:mfrac bevelled="true">
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula>, but remains inconclusive regarding the factor <monospace>group</monospace> <inline-formula id="inf135">
<mml:math id="m136">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mfrac bevelled="true">
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x3c;</mml:mo>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="table" rid="T5">Table 5</xref>).</p>
<table-wrap id="T5" position="float">
<label>TABLE 5</label>
<caption>
<p>Analysis of effects - SRTs in quiet.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Effects</th>
<th align="center">
<inline-formula id="inf118">
<mml:math id="m119">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf119">
<mml:math id="m120">
<mml:mrow>
<mml:mi>P</mml:mi>
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
<mml:mrow>
<mml:mfenced open="|" close="" separators="&#x7c;">
<mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>a</mml:mi>
<mml:mi>t</mml:mi>
<mml:mi>a</mml:mi>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf120">
<mml:math id="m121">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>B</mml:mi>
<mml:mi>F</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mi>n</mml:mi>
<mml:mi>c</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">
<inline-formula id="inf121">
<mml:math id="m122">
<mml:mtext>Environment</mml:mtext>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf122">
<mml:math id="m123">
<mml:mrow>
<mml:mn>0.400</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf123">
<mml:math id="m124">
<mml:mrow>
<mml:mn>0.119</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf124">
<mml:math id="m125">
<mml:mrow>
<mml:mn>0.271</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf125">
<mml:math id="m126">
<mml:mtext>Group</mml:mtext>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf126">
<mml:math id="m127">
<mml:mrow>
<mml:mn>0.400</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf127">
<mml:math id="m128">
<mml:mrow>
<mml:mn>0.172</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf128">
<mml:math id="m129">
<mml:mrow>
<mml:mn>0.446</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">
<inline-formula id="inf129">
<mml:math id="m130">
<mml:mrow>
<mml:mtext>Environment</mml:mtext>
<mml:mo>&#x2217;</mml:mo>
<mml:mi mathvariant="normal">G</mml:mi>
<mml:mtext>roup</mml:mtext>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf130">
<mml:math id="m131">
<mml:mrow>
<mml:mn>0.200</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf131">
<mml:math id="m132">
<mml:mrow>
<mml:mn>0.443</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf132">
<mml:math id="m133">
<mml:mrow>
<mml:mn>12.187</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Post hoc pairwise comparisons confirm evidence for the absence of an effect of test environment <inline-formula id="inf136">
<mml:math id="m137">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>4.78</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="table" rid="T6">Table 6</xref>) but remain inconclusive for all pairwise comparisons between groups <inline-formula id="inf137">
<mml:math id="m138">
<mml:mrow>
<mml:mfenced open="(" close=")" separators="&#x7c;">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mfrac bevelled="true">
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x3c;</mml:mo>
<mml:mtext>BF</mml:mtext>
</mml:mrow>
<mml:mn>01</mml:mn>
</mml:msub>
<mml:mo>&#x3c;</mml:mo>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:math>
</inline-formula> (<xref ref-type="table" rid="T7">Table 7</xref>).</p>
<table-wrap id="T6" position="float">
<label>TABLE 6</label>
<caption>
<p>Posthoc comparison &#x2013; Environment (SRTs in quiet).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="2" align="center"/>
<th align="center">Prior Odds</th>
<th align="center">Posterior Odds</th>
<th align="center">
<inline-formula id="inf138">
<mml:math id="m139">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf139">
<mml:math id="m140">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Loudspeaker-based</td>
<td align="center">Virtual</td>
<td align="center">
<inline-formula id="inf140">
<mml:math id="m141">
<mml:mrow>
<mml:mn>1.00</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf141">
<mml:math id="m142">
<mml:mrow>
<mml:mn>4.78</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf142">
<mml:math id="m143">
<mml:mrow>
<mml:mn>4.78</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf143">
<mml:math id="m144">
<mml:mrow>
<mml:mn>0.0539</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
<table-wrap id="T7" position="float">
<label>TABLE 7</label>
<caption>
<p>Posthoc comparisons - Groups (SRTs in quiet).</p>
</caption>
<table>
<thead valign="top">
<tr>
<th colspan="2" align="center"/>
<th align="center">Prior Odds</th>
<th align="center">Posterior Odds</th>
<th align="center">
<inline-formula id="inf144">
<mml:math id="m145">
<mml:mrow>
<mml:msub>
<mml:mtext>BF</mml:mtext>
<mml:mn>01</mml:mn>
</mml:msub>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
<th align="center">
<inline-formula id="inf145">
<mml:math id="m146">
<mml:mrow>
<mml:mi>e</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>r</mml:mi>
<mml:mi>o</mml:mi>
<mml:mi>r</mml:mi>
<mml:mtext>&#x2009;</mml:mtext>
<mml:mrow>
<mml:mfenced open="[" close="]" separators="&#x7c;">
<mml:mrow>
<mml:mo>%</mml:mo>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mrow>
</mml:math>
</inline-formula>
</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">A</td>
<td align="center">B</td>
<td align="center">
<inline-formula id="inf146">
<mml:math id="m147">
<mml:mrow>
<mml:mn>1.70</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf147">
<mml:math id="m148">
<mml:mrow>
<mml:mn>4.67</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf148">
<mml:math id="m149">
<mml:mrow>
<mml:mn>2.742</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf149">
<mml:math id="m150">
<mml:mrow>
<mml:mn>0.01006</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">A</td>
<td align="center">C</td>
<td align="center">
<inline-formula id="inf150">
<mml:math id="m151">
<mml:mrow>
<mml:mn>1.70</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf151">
<mml:math id="m152">
<mml:mrow>
<mml:mn>1.31</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf152">
<mml:math id="m153">
<mml:mrow>
<mml:mn>0.770</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf153">
<mml:math id="m154">
<mml:mrow>
<mml:mn>0.00928</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">B</td>
<td align="center">C</td>
<td align="center">
<inline-formula id="inf154">
<mml:math id="m155">
<mml:mrow>
<mml:mn>1.70</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf155">
<mml:math id="m156">
<mml:mrow>
<mml:mn>4.36</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf156">
<mml:math id="m157">
<mml:mrow>
<mml:mn>2.560</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf157">
<mml:math id="m158">
<mml:mrow>
<mml:mn>0.01000</mml:mn>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<title>4 Discussion</title>
<sec id="s4-1">
<title>4.1 Comparing loudspeaker-based with virtual test environments</title>
<p>The means of all speech-in-noise measures from our study, in both loudspeaker-based and virtual environments, are consistent with the norms reported in the literature for the German HINT (<xref ref-type="fig" rid="F7">Figure 7</xref>) (<xref ref-type="bibr" rid="B42">Joiko et al., 2021</xref>; <xref ref-type="bibr" rid="B55">M&#xf6;nnich et al., 2023</xref>).</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>Mean speech reception thresholds (SRTs) for spatially separated and colocated noise conditions in this study in both loudspeaker-based and virtual test environments compared to the norms reported for the German HINT with male (<xref ref-type="bibr" rid="B42">Joiko et al., 2021</xref>) and female (<xref ref-type="bibr" rid="B55">M&#xf6;nnich et al., 2023</xref>) speakers.</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g007.tif"/>
</fig>
<p>Similarly, the average SRM from both loudspeaker-based and virtual environments align with the findings of previous studies using the same spatial configuration and masker type employed here (stationary noise at 90&#xb0; azimuth) (<xref ref-type="bibr" rid="B1">Andersen et al., 2016</xref>; <xref ref-type="bibr" rid="B14">Beutelmann and Brand, 2006</xref>; <xref ref-type="bibr" rid="B20">Bronkhorst and Plomp, 1988</xref>; <xref ref-type="bibr" rid="B26">Cosentino et al., 2014</xref>; <xref ref-type="bibr" rid="B40">Jelfs et al., 2011</xref>; <xref ref-type="bibr" rid="B58">M&#xfc;ller, 1992</xref>; <xref ref-type="bibr" rid="B63">Ozimek et al., 2013</xref>; <xref ref-type="bibr" rid="B64">Peissig and Kollmeier, 1997</xref>; <xref ref-type="bibr" rid="B66">Platte and vom H&#xf6;vel, 1980</xref>; <xref ref-type="bibr" rid="B67">Plomp and Mimpen, 1981</xref>).</p>
<p>The validity of our baseline measurement was demonstrated by results consistent with those of previous studies (<xref ref-type="fig" rid="F8">Figure 8</xref>). Furthermore, the similarity of the results obtained in the virtual environments with those of the baseline measurement for both outcome measures (SRM and SRTs in quiet) suggests that it may be feasible to replicate speech-in-noise results from conventional loudspeaker-based setups using portable and inexpensive VR peripherals.</p>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>Mean spatial release from masking (SRM) in this study in both loudspeaker-based and virtual test environments compared to results from previous studies using the same spatial configuration and masker type (stationary noise at 90&#xb0; azimuth) in different setups: Loudspeaker-based (squares), headphone-based with static binaural rendering (triangles), and headphone-based with dynamic binaural rendering (circle) - [Data other than those from this study are from Figure 8.7 in <xref ref-type="bibr" rid="B28">Culling and Lavandier (2021)</xref>. The data point from <xref ref-type="bibr" rid="B40">Jelfs et al. (2011)</xref> corresponds to a model-based prediction for a stationary noise source at 90&#xb0; azimuth and target speech in front using generic head-related transfer functions (diamond)].</p>
</caption>
<graphic xlink:href="frvir-05-1470382-g008.tif"/>
</fig>
<p>It should be noted, however, that our results are limited to the experimental conditions tested: anechoic environment, with a single target speech source and a single energetic masker in spatially separated (90&#xb0;) and colocated conditions.</p>
<p>With this preliminary study, we aimed to provide initial evidence that state-of-the-art technology can be used to reproduce results from conventional laboratory and clinical settings. The positive results of our study encourage further testing in more complex, i.e., realistic, listening environments, which may lead to more ecologically valid results. In particular, evaluating the ability to reproduce results from &#x201c;real&#x201d; reverberant conditions in virtual audiovisual environments is of great interest.</p>
<p>In this context, a significant body of research has shown that with current methods and technologies, such as those used in this study, it is possible to create virtual auditory environments indistinguishable from reality, even in reverberant listening conditions. Plausible and authentic virtual acoustic presentations are possible in both anechoic (<xref ref-type="bibr" rid="B4">Arend et al., 2021a</xref>; <xref ref-type="bibr" rid="B82">Weber et al., 2024</xref>) and reverberant conditions (<xref ref-type="bibr" rid="B50">Lindau and Weinzierl, 2012</xref>; <xref ref-type="bibr" rid="B18">Brinkmann et al., 2017</xref>; <xref ref-type="bibr" rid="B17">Brinkmann et al., 2019</xref>; <xref ref-type="bibr" rid="B5">Arend et al., 2021b</xref>; <xref ref-type="bibr" rid="B3">Arend et al., 2024</xref>). However, as outlined in the introduction, exploiting multisensory integration is crucial to achieving this level of realism (<xref ref-type="bibr" rid="B44">Keidser et al., 2020</xref>). Many relevant technical aspects have to be considered, such as the use of real-time motion compensation, the use of matching visuals or matching the auditory environment perfectly to the visual real world, including appropriate descriptions of the source and receiver characteristics, e.g., source directivity and HRTFs, and correct headphone compensation filters, among others.</p>
<p>With this contribution, we include the Unity project (including all the virtual environments described here), which facilitates the reproducibility and extension of our experimental setup. Multiple sources can be easily added, and all stimuli (including audio and text) can be replaced using the regular file system. This means our setup can be easily transferred to different stimuli in different languages. The application also allows easy customization of various parameters, such as the number of noise conditions, lists, sentences, practice rounds, and adaptive step sizes. There is no need to modify the source code, as all customization can be done using the Unity Inspector interface.</p>
</sec>
<sec id="s4-2">
<title>4.2 On the use of headphone-based dynamic binaural rendering for speech-in-noise testing</title>
<p>To our knowledge, this study is the first to investigate speech-in-noise abilities using headphone-based dynamic binaural rendering with non-individual HRTFs. The role of spontaneous head movements in increasing the target signal level when speech intelligibility decreases, also known as the head orientation benefit (HOB), has been extensively studied. Kock&#x2019;s work in 1950 was the first to demonstrate this phenomenon (<xref ref-type="bibr" rid="B48">Kock, 1950</xref>). Later, <xref ref-type="bibr" rid="B33">Grange and Culling (2016)</xref> investigated the benefits of head orientation away from the speech source in NH listeners. They analyzed spontaneous head orientations when listeners were presented with long speech clips of gradually decreasing SNR in an acoustically treated room. The speech was presented from a loudspeaker initially facing the listener, and the competing noise was presented from one of four other locations. In an undirected paradigm, they observed that listeners instinctively turned their heads away from the speech (between &#xb1;10&#xb0; and &#xb1;65&#xb0;) in 56% of trials in response to increased intelligibility difficulties. They then observed that when subjects were explicitly instructed to perform head movements, all turned away from speech at lower SNRs and immediately reached head orientations associated with lower SRTs.</p>
<p>Similarly, <xref ref-type="bibr" rid="B16">Brimijoin et al. (2012)</xref> investigated head orientation strategies in a speech comprehension task in the presence of spatially separated competing noise. They found a clear tendency to orient approximately 60&#xb0; away from the target, regardless of the position of the distractor signal, in listeners with large (&#x3e;16&#xa0;dB) hearing threshold differences between their left and right ears.</p>
<p>We did not log the head-tracking data in this study because spontaneous head movements and the resulting HOB have already been studied and demonstrated for a long time. However, as expected, we observed that participants&#x2019; behavior regarding head orientations was consistent with the abovementioned findings.</p>
<p>The videos in the <xref ref-type="sec" rid="s12">Supplementary Material</xref>, recorded from the listener&#x2019;s perspective, exemplify the spontaneous use of head movements and illustrate the (very pronounced) level increase in one ear when head movements are exploited.</p>
</sec>
<sec id="s4-3">
<title>4.3 On the use of visual feedback</title>
<p>In addition to supporting auditory spatial perception by aiding externalization and distance estimation (<xref ref-type="bibr" rid="B12">Best et al., 2020</xref>), the use of visual cues in speech-in-noise testing paradigms may facilitate a better understanding of the mechanisms underlying auditory perception from a multisensory perspective and potentially lead to significant advances in hearing research. In clinical settings, using appropriate visual cues can improve current assessment procedures&#x2019; ecological validity and accuracy. For example, lip-reading has been shown to support speech intelligibility in noisy environments, and it is an aspect that is still overlooked in current speech-in-noise assessment methods (<xref ref-type="bibr" rid="B35">Helfer and Freyman, 2005</xref>; <xref ref-type="bibr" rid="B85">Williams et al., 2023</xref>; <xref ref-type="bibr" rid="B87">Yuan et al., 2021</xref>). In this study, we used a simple visual cue at the target speaker location without facial expressions or lip movements, and we found evidence of the absence of an effect of the visual feedback used in SRM.</p>
<p>Nevertheless, this may be different in more complex listening scenarios, for example, in cases where there is some uncertainty about the target speaker&#x2019;s location or in multi-speaker or cocktail party scenarios. There may be interactions between vision and auditory perception that require further investigation, and VR can play an important role in supporting auditory research in this regard. Further work should focus on understanding these potential interactions and their impact on speech intelligibility and listening effort.</p>
</sec>
<sec id="s4-4">
<title>4.4 On the use of unsupervised procedures</title>
<p>While currently available speech-in-noise tests are reliable, they require manual scoring by a clinician, which can be inconvenient in busy clinical settings. As a result, these tests are not widely used in routine hearing evaluations. The literature highlights the need for automated tests, which could allow testing while the patient is waiting in the clinic or remotely (<xref ref-type="bibr" rid="B38">Jakien et al., 2017</xref>). The use of closed-set tasks may facilitate unsupervised measurements.</p>
<p>To assess whether using a closed-set instead of the standard open-set procedure from the HINT would affect speech-in-noise measures, we incorporated the word selection feedback system into the VR3 test. We found evidence for the absence of an effect of group and environment on the SRM measure, suggesting that it is feasible to conduct speech-in-noise testing in VR, even with the unsupervised procedure introduced here. However, the evidence supporting our unsupervised procedure was too weak to be conclusive for SRTs in quiet.</p>
<p>Closed-set procedures may result in reduced (better) SRTs, mainly when the response set contains few phonetically dissimilar alternatives (<xref ref-type="bibr" rid="B54">Miller, Heise, and Lighten, 1951</xref>; <xref ref-type="bibr" rid="B81">Warzybok et al., 2015</xref>; <xref ref-type="bibr" rid="B21">Buss, Leibold, and Hall, 2016</xref>). Future research in remote or unsupervised speech-in-noise testing could explore alternative ways to design automated response systems. For example, <xref ref-type="bibr" rid="B51">Litovsky (2005)</xref> suggested adjusting task difficulty based on the listener&#x2019;s age. Their results indicated that 4AFC tasks were easier for adults than children, resulting in lower SRTs. <xref ref-type="bibr" rid="B41">Johnstone and Litovsky (2006)</xref> subsequently found significant differences in adult SRTs using 4AFC and 25AFC tasks as response methods, but only when speech was used as a masker, not when modulated noise was used, suggesting that appropriate response methods could be both population and stimuli-dependent.</p>
<p>Another attractive alternative may be to incorporate automatic speech recognition (ASR) into the virtual tests, preserving the open-set nature of the task while allowing for unsupervised testing. <xref ref-type="bibr" rid="B62">Ooster et al. (2023)</xref> proposed using an ASR for automatic response recording based on a time-delay neural network. They estimate an SRT deviation below 1.38&#xa0;dB for 95% of users with this method, suggesting that robust unsupervised testing may be possible with similar accuracy as with a human supervisor, even in noisy conditions and with altered or disordered speech from elderly severely hearing-impaired listeners and cochlear implant users.</p>
</sec>
<sec id="s4-5">
<title>4.5 Other remarks regarding virtualization</title>
<p>While VR offers potentially groundbreaking opportunities, many issues must be carefully considered before virtual testing can be used for individualized screening in clinical settings. Some of them are:</p>
<p>Our preliminary results represent only a small sample of NH adults. These results cannot be generalized to other populations. Follow-up studies should include large standardized samples, including patients with hearing loss, auditory processing disorders, and NH controls of different age groups in both conventional loudspeaker-based and VR conditions.</p>
<p>Although VR and its potential to serve children has been extensively researched, particularly in educational and medical settings, such as a tool for pain distraction, assessment of Attention Deficit/Hyperactivity Disorder (ADHD) and Autism Spectrum Disorder (ASD), and psychotherapy, among others, many questions remain about the potential impact of VR on children&#x2019;s development. It is still unclear whether we could use VR to assess speech-in-noise abilities in children. Further research should focus on creating controlled and safe environments that allow us to address these questions. This includes aspects such as appropriate exposure times, appropriate complexity of visual feedback, and considerations such as appropriate HRTF sets from a technical virtual acoustics perspective.</p>
<p>VR can cause motion sickness. In our study, participants did not report any adverse effects while using the HMD. However, this is a relevant aspect when using more complex visual feedback, as this may increase the likelihood of experiencing it.</p>
<p>Managing cognitive load is another major challenge in the design and use of VR. Excessive cognitive load can negatively affect the user experience, cause fatigue, and interfere with task performance. Therefore, future research should focus on understanding the relationship between increased realism in virtual environments, its associated cognitive load, and its potential impact on measures of auditory processing.</p>
<p>Recognizing and addressing the potential drawbacks of VR through research, innovation, and responsible use can help maximize its benefits while minimizing its risks. By promoting ethical and inclusive practices and fostering a balanced approach to VR adoption, we can harness this transformative technology in hearing research and audiological healthcare.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>Our results suggest that conducting the HINT, a widely accepted and accurate speech-in-noise test, is feasible in state-of-the-art VR environments, even without experimenter supervision. We found no statistically significant differences between the SRM measures obtained in any of the VR environments tested and the loudspeaker-based setup used as a baseline. However, for the SRTs in quiet, the evidence was too weak to be conclusive. Nevertheless, as described in the Introduction, speech-in-noise measures are considered to be more representative of a person&#x2019;s functional hearing ability in real-life listening scenarios than SRTs in quiet, and current literature and clinical guidelines encourage the use of speech-in-noise testing rather than measuring SRTs in quiet when a more comprehensive understanding of a patient&#x2019;s everyday hearing ability is desired.</p>
<p>Although our study was limited to an anechoic environment with a single target speech source and a single energetic masker (following the standard setup for the HINT), our findings pave the way for further research. Future studies should evaluate how these results generalize to more complex listening scenarios, such as those involving multiple speakers, greater visual complexity, and more diverse populations. Specifically, future work should investigate how these results apply to hard-of-hearing individuals, including patients with hearing loss and auditory processing disorders in different age groups.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="s12">Supplementary Material</xref>, further inquiries can be directed to the corresponding author. To promote and foster open-source and reproducible research, we have made the Unity project (including all the virtual environments described here) available at <ext-link ext-link-type="uri" xlink:href="https://github.com/AudioGroupCologne/HINT-VR">https://github.com/AudioGroupCologne/HINT-VR</ext-link> under a Creative Commons license CC BY-NC-SA 4.0. Supplemental material for this article is available online in <ext-link ext-link-type="uri" xlink:href="https://zenodo.org/records/13952337">https://zenodo.org/records/13952337</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="s7">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Institute of Computer and Communication Technology at the TH K&#xf6;ln. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="s8">
<title>Author contributions</title>
<p>MR: Conceptualization, Formal Analysis, Investigation, Methodology, Visualization, Writing&#x2013;original draft, Writing&#x2013;review and editing. AM: Data curation, Software, Writing&#x2013;review and editing. JA: Funding acquisition, Supervision, Validation, Writing&#x2013;review and editing. HH: Data curation, Writing&#x2013;review and editing. TR: Resources, Validation, Writing&#x2013;review and editing. CP: Funding acquisition, Project administration, Supervision, Validation, Writing&#x2013;review and editing.</p>
</sec>
<sec sec-type="funding-information" id="s9">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This work was sponsored by the German Federal Ministry of Education and Research BMBF (13FH666IA6-VIWER-S) and partly by the German Research Foundation (DFG WE 4057/21-1).</p>
</sec>
<ack>
<p>The authors express their gratitude to all the participants who took part in the study. They would also like to thank Tilman Brach and Johan Dasbach for their assistance in collecting the data. The authors would like to thank the handling editor and the reviewers for their constructive feedback, which significantly improved the quality of the manuscript.</p>
</ack>
<sec sec-type="COI-statement" id="s10">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s11">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/frvir.2024.1470382/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/frvir.2024.1470382/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Andersen</surname>
<given-names>A. H.</given-names>
</name>
<name>
<surname>de Haan</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Tan</surname>
<given-names>Z. H.</given-names>
</name>
<name>
<surname>Jensen</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Predicting the intelligibility of noisy and nonlinearly processed binaural speech</article-title>. <source>IEEE/ACM Trans. Audio, Speech, Lang. Process.</source> <volume>24</volume> (<issue>11</issue>), <fpage>1908</fpage>&#x2013;<lpage>1920</lpage>. <pub-id pub-id-type="doi">10.1109/TASLP.2016.2588002</pub-id>
</citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arbogast</surname>
<given-names>T. L.</given-names>
</name>
<name>
<surname>Mason</surname>
<given-names>C. R.</given-names>
</name>
<name>
<surname>Kidd</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>The effect of spatial separation on informational masking of speech in normal-hearing and hearing-impaired listeners</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>117</volume> (<issue>4</issue>), <fpage>2169</fpage>&#x2013;<lpage>2180</lpage>. <pub-id pub-id-type="doi">10.1121/1.1861598</pub-id>
</citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arend</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Brinkmann</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Ram&#xed;rez</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Scheer</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Weinzierl</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2024</year>). &#x201c;<article-title>Auditory distance perception in a real and virtual walk-through environment</article-title>,&#x201d; in <source>Proceedings of the 50th DAGA</source>. <comment>Hannover</comment>.</citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arend</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Ram&#xed;rez</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Liesefeld</surname>
<given-names>H. R.</given-names>
</name>
<name>
<surname>P&#x4e7;rschmann</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2021a</year>). <article-title>Do near-field cues enhance the plausibility of non-individual binaural rendering in a dynamic multimodal virtual acoustic scene?</article-title> <source>Acta Acust.</source> <volume>5</volume> (<issue>3</issue>), <fpage>55</fpage>. <pub-id pub-id-type="doi">10.1051/aacus/2021048</pub-id>
</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Arend</surname>
<given-names>J. M.</given-names>
</name>
<name>
<surname>Schissler</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Klein</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Robinson</surname>
<given-names>P. W.</given-names>
</name>
</person-group> (<year>2021b</year>). <article-title>Six-Degrees-of-Freedom parametric spatial audio based on one monaural room impulse response</article-title>. <source>J. Audio Eng. Soc.</source> <volume>69</volume> (<issue>7/8</issue>), <fpage>557</fpage>&#x2013;<lpage>575</lpage>. <pub-id pub-id-type="doi">10.17743/jaes.2021.0009</pub-id>
</citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Beck</surname>
<given-names>D. L.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Speech-in-Noise testing: pivotal and rare</article-title>. <source>Hear. J.</source> <volume>76</volume> (<issue>12</issue>), <fpage>28</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1097/01.HJ.0000997248.20295.53</pub-id>
</citation>
</ref>
<ref id="B7">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Beechey</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2002</year>). <article-title>Ecological validity, external validity, and mundane realism in hearing science</article-title>. <source>Ear Hear.</source> <volume>43</volume> (<issue>5</issue>), <fpage>1395</fpage>&#x2013;<lpage>1401</lpage>. <pub-id pub-id-type="doi">10.1097/AUD.0000000000001202</pub-id>
</citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Begault</surname>
<given-names>D. R.</given-names>
</name>
<name>
<surname>Wenzel</surname>
<given-names>E. M.</given-names>
</name>
<name>
<surname>Anderson</surname>
<given-names>M. R.</given-names>
</name>
</person-group> (<year>2001</year>). <article-title>Direct comparison of the impact of head tracking, reverberation, and individualized head-related transfer functions on the spatial perception of a virtual speech source</article-title>. <source>AES J. Audio Eng. Soc.</source> <volume>49</volume> (<issue>10</issue>), <fpage>904</fpage>&#x2013;<lpage>916</lpage>.</citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bellis</surname>
<given-names>T. J.</given-names>
</name>
<name>
<surname>Bellis</surname>
<given-names>J. D.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Central auditory processing disorders in children and adults</article-title>. <source>Handb. Clin. Neurology</source> <volume>129</volume> (<issue>1954</issue>), <fpage>537</fpage>&#x2013;<lpage>556</lpage>. <pub-id pub-id-type="doi">10.1016/B978-0-444-62630-1.00030-5</pub-id>
</citation>
</ref>
<ref id="B10">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bench</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kowal</surname>
<given-names>&#xc5;.</given-names>
</name>
<name>
<surname>Bamford</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1979</year>). <article-title>The BKB (Bamford-Kowal-Bench) sentence lists for partially-hearing children</article-title>. <source>Br. J. Audiology</source> <volume>13</volume> (<issue>3</issue>), <fpage>108</fpage>&#x2013;<lpage>112</lpage>. <pub-id pub-id-type="doi">10.3109/03005367909078884</pub-id>
</citation>
</ref>
<ref id="B11">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Bernsch&#xfc;tz</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2013</year>). &#x201c;<article-title>A spherical far field HRIR HRTF compilation of the Neumann KU 100</article-title>,&#x201d; in <source>Proceedings of the 39th DAGA</source>, <fpage>592</fpage>&#x2013;<lpage>595</lpage>.</citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Best</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Baumgartner</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Lavandier</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Majdak</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Kop&#x10d;o</surname>
<given-names>N.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Sound externalization: a review of recent research</article-title>. <source>Trends Hear.</source> <volume>24</volume>, <fpage>1</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1177/2331216520948390</pub-id>
</citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Best</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Marrone</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Mason</surname>
<given-names>C. R.</given-names>
</name>
<name>
<surname>Kidd</surname>
<given-names>G.</given-names>
<suffix>Jr.</suffix>
</name>
</person-group> (<year>2012</year>). <article-title>The influence of non-spatial factors on measures of spatial release from masking</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>131</volume> (<issue>4</issue>), <fpage>3103</fpage>&#x2013;<lpage>3110</lpage>. <pub-id pub-id-type="doi">10.1121/1.3693656</pub-id>
</citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Beutelmann</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Brand</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Prediction of speech intelligibility in spatial noise and reverberation for normal-hearing and hearing-impaired listeners</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>120</volume> (<issue>1</issue>), <fpage>331</fpage>&#x2013;<lpage>342</lpage>. <pub-id pub-id-type="doi">10.1121/1.2202888</pub-id>
</citation>
</ref>
<ref id="B15">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Brimijoin</surname>
<given-names>W. O.</given-names>
</name>
<name>
<surname>Boyd</surname>
<given-names>A. W.</given-names>
</name>
<name>
<surname>Akeroyd</surname>
<given-names>M. A.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>The contribution of head movement to the externalization and internalization of sounds</article-title>. <source>PLoS ONE</source> <volume>8</volume> (<issue>12</issue>), <fpage>830688</fpage>&#x2013;<lpage>e83112</lpage>. <pub-id pub-id-type="doi">10.1371/journal.pone.0083068</pub-id>
</citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Brimijoin</surname>
<given-names>W. O.</given-names>
</name>
<name>
<surname>McShefferty</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Akeroyd</surname>
<given-names>M. A.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Undirected head movements of listeners with asymmetrical hearing impairment during a speech-in-noise task</article-title>. <source>Hear. Res.</source> <volume>283</volume> (<issue>1&#x2013;2</issue>), <fpage>162</fpage>&#x2013;<lpage>168</lpage>. <pub-id pub-id-type="doi">10.1016/j.heares.2011.10.009</pub-id>
</citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Brinkmann</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Asp&#xf6;ck</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Ackermann</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Lepa</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Vorl&#xe4;nder</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Weinzierl</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>A round robin on room acoustical simulation and auralization</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>145</volume> (<issue>4</issue>), <fpage>2746</fpage>&#x2013;<lpage>2760</lpage>. <pub-id pub-id-type="doi">10.1121/1.5096178</pub-id>
</citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Brinkmann</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Lindau</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Weinzierl</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>On the authenticity of individual dynamic binaural synthesis</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>142</volume> (<issue>4</issue>), <fpage>1784</fpage>&#x2013;<lpage>1795</lpage>. <pub-id pub-id-type="doi">10.1121/1.5005606</pub-id>
</citation>
</ref>
<ref id="B19">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bronkhorst</surname>
<given-names>A. W.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>The cocktail party phenomenon: a review of research on speech intelligibility in multiple-talker conditions</article-title>. <source>Acta Acustica united Acustica</source> <volume>86</volume> (<issue>1</issue>), <fpage>117</fpage>&#x2013;<lpage>128</lpage>.</citation>
</ref>
<ref id="B20">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bronkhorst</surname>
<given-names>A. W.</given-names>
</name>
<name>
<surname>Plomp</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>1988</year>). <article-title>The effect of head-induced interaural time and level differences on speech intelligibility in noise</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>83</volume> (<issue>4</issue>), <fpage>1508</fpage>&#x2013;<lpage>1516</lpage>. <pub-id pub-id-type="doi">10.1121/1.395906</pub-id>
</citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Buss</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Leibold</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Hall</surname>
<given-names>J. W.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Effect of response context and masker type on word recognition in school-age children and adults</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>140</volume> (<issue>2</issue>), <fpage>968</fpage>&#x2013;<lpage>977</lpage>. <pub-id pub-id-type="doi">10.1121/1.4960587</pub-id>
</citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Busse</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Roberts</surname>
<given-names>K. C.</given-names>
</name>
<name>
<surname>Crist</surname>
<given-names>R. E.</given-names>
</name>
<name>
<surname>Weissman</surname>
<given-names>D. H.</given-names>
</name>
<name>
<surname>Woldorff</surname>
<given-names>M. G.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>The spread of attention across modalities and space in a multisensory object</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A.</source> <volume>102</volume> (<issue>51</issue>), <fpage>18751</fpage>&#x2013;<lpage>18756</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.0507704102</pub-id>
</citation>
</ref>
<ref id="B23">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cameron</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Dillon</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2007</year>). &#x2018;<article-title>Development of the listening in spatialized noise-sentences test (LISN-S)</article-title>&#x2019;, <source>Ear Hear.</source>, <volume>28</volume>(<issue>2</issue>), pp. <fpage>196</fpage>&#x2013;<lpage>211</lpage>. <pub-id pub-id-type="doi">10.1097/AUD.0b013e318031267f</pub-id>
</citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carhart</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Tillman</surname>
<given-names>T. W.</given-names>
</name>
</person-group> (<year>1970</year>). <article-title>Interaction of competing speech signals with hearing losses</article-title>. <source>Archives Otolaryngology</source> <volume>91</volume> (<issue>3</issue>), <fpage>273</fpage>&#x2013;<lpage>279</lpage>. <pub-id pub-id-type="doi">10.1001/archotol.1970.00770040379010</pub-id>
</citation>
</ref>
<ref id="B25">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Clark</surname>
<given-names>J. G.</given-names>
</name>
<name>
<surname>Huff</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Earl</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Clinical practice report card&#x2013;Are we meeting best practice standards for adult hearing rehabilitation?</article-title> <source>Audiol. Today</source> <volume>29</volume> (<issue>6</issue>), <fpage>15</fpage>&#x2013;<lpage>25</lpage>.</citation>
</ref>
<ref id="B26">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cosentino</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Marquardt</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>McAlpine</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Culling</surname>
<given-names>J. F.</given-names>
</name>
<name>
<surname>Falk</surname>
<given-names>T. H.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>A model that predicts the binaural advantage to speech intelligibility from the mixed target and interferer signals</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>135</volume> (<issue>2</issue>), <fpage>796</fpage>&#x2013;<lpage>807</lpage>. <pub-id pub-id-type="doi">10.1121/1.4861239</pub-id>
</citation>
</ref>
<ref id="B27">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cuevas-Rodr&#xed;guez</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Picinali</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Gonz&#xe1;lez-Toledo</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Garre</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>de la Rubia-Cuestas</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Molina-Tanco</surname>
<given-names>L.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>3D Tune-In Toolkit: an open-source library for real-time binaural spatialisation</article-title>. <source>PLoS ONE</source> <volume>14</volume> (<issue>3</issue>), <fpage>e0211899</fpage>. <pub-id pub-id-type="doi">10.1371/JOURNAL.PONE.0211899</pub-id>
</citation>
</ref>
<ref id="B28">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Culling</surname>
<given-names>J. F.</given-names>
</name>
<name>
<surname>Lavandier</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2021</year>). &#x201c;<article-title>Binaural unmasking and spatial release from masking</article-title>,&#x201d; in <source>Binaural hearing. Springer handbook of auditory research</source>. Editor <person-group person-group-type="editor">
<name>
<surname>Litovsky</surname>
<given-names>R. Y.</given-names>
</name>
</person-group>, (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>209</fpage>&#x2013;<lpage>241</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-57100-9_8</pub-id>
</citation>
</ref>
<ref id="B29">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dirks</surname>
<given-names>D. D.</given-names>
</name>
<name>
<surname>Wilson</surname>
<given-names>R. H.</given-names>
</name>
</person-group> (<year>1969</year>). &#x2018;<article-title>The effect of spatially separated sound sources on speech intelligibility</article-title>.&#x2019;, <source>J. speech Hear. Res.</source>, <volume>12</volume>(<issue>1</issue>), pp. <fpage>5</fpage>&#x2013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1044/jshr.1201.05</pub-id>
</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Erbes</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Geier</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wierstorf</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Spors</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2017</year>). &#x201c;<article-title>Free database of low-frequency corrected head-related transfer functions and headphone compensation filters</article-title>,&#x201c; in <source>Proceedings of 142nd audio engineering society convention</source> <volume>325</volume>, <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Garadat</surname>
<given-names>S. N.</given-names>
</name>
<name>
<surname>Litovsky</surname>
<given-names>R. Y.</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Zeng</surname>
<given-names>F. G.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Role of binaural hearing in speech intelligibility and spatial release from masking using vocoded speech</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>126</volume> (<issue>1522</issue>), <fpage>2522</fpage>&#x2013;<lpage>2535</lpage>. <pub-id pub-id-type="doi">10.1121/1.3238242</pub-id>
</citation>
</ref>
<ref id="B32">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gaveau</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Coudert</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Salemme</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Koun</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Desoche</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Truy</surname>
<given-names>E.</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Benefits of active listening during 3D sound localization</article-title>. <source>Exp. Brain Res.</source> <volume>240</volume> (<issue>11</issue>), <fpage>2817</fpage>&#x2013;<lpage>2833</lpage>. <pub-id pub-id-type="doi">10.1007/s00221-022-06456-x</pub-id>
</citation>
</ref>
<ref id="B33">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Grange</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>Culling</surname>
<given-names>J. F.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>The benefit of head orientation to speech intelligibility in noise</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>139</volume> (<issue>2</issue>), <fpage>703</fpage>&#x2013;<lpage>712</lpage>. <pub-id pub-id-type="doi">10.1121/1.4941655</pub-id>
</citation>
</ref>
<ref id="B34">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hawley</surname>
<given-names>M. L.</given-names>
</name>
<name>
<surname>Litovsky</surname>
<given-names>R. Y.</given-names>
</name>
<name>
<surname>Culling</surname>
<given-names>J. F.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>The benefit of binaural hearing in a cocktail party: effect of location and type of interferer</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>115</volume> (<issue>2</issue>), <fpage>833</fpage>&#x2013;<lpage>843</lpage>. <pub-id pub-id-type="doi">10.1121/1.1639908</pub-id>
</citation>
</ref>
<ref id="B35">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Helfer</surname>
<given-names>K. S.</given-names>
</name>
<name>
<surname>Freyman</surname>
<given-names>R. L.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>The role of visual speech cues in reducing energetic and informational masking</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>117</volume> (<issue>2</issue>), <fpage>842</fpage>&#x2013;<lpage>849</lpage>. <pub-id pub-id-type="doi">10.1121/1.1836832</pub-id>
</citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hess</surname>
<given-names>C. L.</given-names>
</name>
<name>
<surname>Misurelli</surname>
<given-names>S. M.</given-names>
</name>
<name>
<surname>Litovsky</surname>
<given-names>R. Y.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Spatial release from masking in 2-year-olds with normal hearing and with bilateral cochlear implants</article-title>. <source>Trends Hear.</source> <volume>22</volume>, <fpage>2331216518775567</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1177/2331216518775567</pub-id>
</citation>
</ref>
<ref id="B37">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Isaiah</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Vongpaisal</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>King</surname>
<given-names>A. J.</given-names>
</name>
<name>
<surname>Hartley</surname>
<given-names>D. E. H.</given-names>
</name>
</person-group> (<year>2014</year>). <article-title>Multisensory training improves auditory spatial processing following bilateral cochlear implantation</article-title>. <source>J. Neurosci.</source> <volume>34</volume> (<issue>33</issue>), <fpage>11119</fpage>&#x2013;<lpage>11130</lpage>. <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4767-13.2014</pub-id>
</citation>
</ref>
<ref id="B38">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jakien</surname>
<given-names>K. M.</given-names>
</name>
<name>
<surname>Kampel</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Stansell</surname>
<given-names>M. M.</given-names>
</name>
<name>
<surname>Gallun</surname>
<given-names>F. J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Validating a rapid, automated test of spatial release from masking</article-title>. <source>Am. J. Audiology</source> <volume>26</volume> (<issue>4</issue>), <fpage>507</fpage>&#x2013;<lpage>518</lpage>. <pub-id pub-id-type="doi">10.1044/2017_AJA-17-0013</pub-id>
</citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<collab>Jamovi [Computer Software]</collab> (<year>2022</year>). &#x2018;<article-title>The Jamovi project</article-title>.</citation>
</ref>
<ref id="B40">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jelfs</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Culling</surname>
<given-names>J. F.</given-names>
</name>
<name>
<surname>Lavandier</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Revision and validation of a binaural model for speech intelligibility in noise</article-title>. <source>Hear. Res.</source> <volume>275</volume> (<issue>1&#x2013;2</issue>), <fpage>96</fpage>&#x2013;<lpage>104</lpage>. <pub-id pub-id-type="doi">10.1016/j.heares.2010.12.005</pub-id>
</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Johnstone</surname>
<given-names>P. M.</given-names>
</name>
<name>
<surname>Litovsky</surname>
<given-names>R. Y.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>Effect of masker type and age on speech intelligibility and spatial release from masking in children and adults</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>120</volume> (<issue>4</issue>), <fpage>2177</fpage>&#x2013;<lpage>2189</lpage>. <pub-id pub-id-type="doi">10.1121/1.2225416</pub-id>
</citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Joiko</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Bohnert</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Strieth</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Soli</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Rader</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The German hearing in noise test</article-title>. <source>Int. J. Audiology</source> <volume>60</volume> (<issue>110</issue>), <fpage>927</fpage>&#x2013;<lpage>933</lpage>. <pub-id pub-id-type="doi">10.1080/14992027.2020.1837969</pub-id>
</citation>
</ref>
<ref id="B43">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Keidser</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2016</year>). <article-title>Introduction to special issue: towards ecologically valid protocols for the assessment of hearing and hearing devices</article-title>. <source>J. Am. Acad. Audiology</source> <volume>27</volume> (<issue>7</issue>), <fpage>502</fpage>&#x2013;<lpage>503</lpage>. <pub-id pub-id-type="doi">10.3766/jaaa.27.7.1</pub-id>
</citation>
</ref>
<ref id="B44">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Keidser</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Naylor</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Brungart</surname>
<given-names>D. S.</given-names>
</name>
<name>
<surname>Caduff</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Campos</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Carlile</surname>
<given-names>S.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>The quest for ecological validity in hearing science: what it is, why it matters, and how to advance it</article-title>. <source>Ear &#x26; Hear.</source> <volume>41</volume> (<issue>1</issue>), <fpage>5</fpage>&#x2013;<lpage>19</lpage>. <pub-id pub-id-type="doi">10.1097/AUD.0000000000000944</pub-id>
</citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Keysers</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Gazzola</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Wagenmakers</surname>
<given-names>E. J.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Using Bayes factor hypothesis testing in neuroscience to establish evidence of absence</article-title>. <source>Nat. Neurosci.</source> <volume>23</volume> (<issue>7</issue>), <fpage>788</fpage>&#x2013;<lpage>799</lpage>. <pub-id pub-id-type="doi">10.1038/s41593-020-0660-4</pub-id>
</citation>
</ref>
<ref id="B46">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kidd</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Mason</surname>
<given-names>C. R.</given-names>
</name>
<name>
<surname>Brughera</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>The role of reverberation in release from masking due to spatial separation of sources for speech identification</article-title>. <source>Acta Acustica united with Acustica</source> <volume>91</volume>, <fpage>526</fpage>&#x2013;<lpage>536</lpage>. <pub-id pub-id-type="doi">10.1121/1.4809166</pub-id>
</citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Killion</surname>
<given-names>M. C.</given-names>
</name>
<name>
<surname>Niquette</surname>
<given-names>P. A.</given-names>
</name>
<name>
<surname>Gudmundsen</surname>
<given-names>G. I.</given-names>
</name>
<name>
<surname>Revit</surname>
<given-names>L. J.</given-names>
</name>
<name>
<surname>Banerjee</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Development of a quick speech-in-noise test for measuring signal-to-noise ratio loss in normal-hearing and hearing-impaired listeners</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>116</volume> (<issue>4</issue>), <fpage>2395</fpage>&#x2013;<lpage>2405</lpage>. <pub-id pub-id-type="doi">10.1121/1.1784440</pub-id>
</citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kock</surname>
<given-names>W. E.</given-names>
</name>
</person-group> (<year>1950</year>). <article-title>Binaural localization and masking</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>22</volume> (<issue>6</issue>), <fpage>801</fpage>&#x2013;<lpage>804</lpage>. <pub-id pub-id-type="doi">10.1121/1.1906692</pub-id>
</citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lindau</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Brinkmann</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Perceptual evaluation of head-phone compensation in binaural synthesis based on non-individual recordings</article-title>. <source>J. Audio Eng. Soc.</source> <volume>60</volume> (<issue>1/2</issue>), <fpage>54</fpage>&#x2013;<lpage>62</lpage>.</citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lindau</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Weinzierl</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Assessing the plausibility of virtual acoustic environments</article-title>. <source>Acta Acustica united Acustica</source> <volume>98</volume> (<issue>5</issue>), <fpage>804</fpage>&#x2013;<lpage>810</lpage>. <pub-id pub-id-type="doi">10.3813/AAA.918562</pub-id>
</citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Litovsky</surname>
<given-names>R. Y.</given-names>
</name>
</person-group> (<year>2005</year>). <article-title>Speech intelligibility and spatial release from masking in young children</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>117</volume> (<issue>5</issue>), <fpage>3091</fpage>&#x2013;<lpage>3099</lpage>. <pub-id pub-id-type="doi">10.1121/1.1873913</pub-id>
</citation>
</ref>
<ref id="B52">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Majdak</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Zotter</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Brinkmann</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>De Muynke</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Mihocic</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Noisternig</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Spatially oriented format for acoustics 2.1: introduction and recent advances</article-title>. <source>AES J. Audio Eng. Soc.</source> <volume>70</volume> (<issue>7&#x2013;8</issue>), <fpage>565</fpage>&#x2013;<lpage>584</lpage>. <pub-id pub-id-type="doi">10.17743/jaes.2022.0026</pub-id>
</citation>
</ref>
<ref id="B53">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Middlebrooks</surname>
<given-names>J. C.</given-names>
</name>
<name>
<surname>Simon</surname>
<given-names>J. Z.</given-names>
</name>
<name>
<surname>Popper</surname>
<given-names>A. N.</given-names>
</name>
<name>
<surname>Fay</surname>
<given-names>R. F.</given-names>
</name>
</person-group> (<year>2017</year>). <source>Springer handbook of auditory research: the auditory system at the cocktail party</source>. Editor <person-group person-group-type="editor">
<name>
<surname>Press</surname>
<given-names>A.</given-names>
</name>
</person-group> (<publisher-name>Springer</publisher-name>), <volume>60</volume>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/978-3-319-51662-2">https://doi.org/10.1007/978-3-319-51662-2</ext-link>
</comment>
</citation>
</ref>
<ref id="B54">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Miller</surname>
<given-names>G. A.</given-names>
</name>
<name>
<surname>Heise</surname>
<given-names>G. A.</given-names>
</name>
<name>
<surname>Lighten</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>1951</year>). &#x2018;<article-title>The intelligibility of speech as a function of the context of the test materials</article-title>
<source>J. Exp. Psychol.</source>, <volume>41</volume>(<issue>5</issue>), pp. <fpage>329</fpage>&#x2013;<lpage>335</lpage>. <pub-id pub-id-type="doi">10.1037/h0062491</pub-id>
</citation>
</ref>
<ref id="B55">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>M&#xf6;nnich</surname>
<given-names>A.-L.</given-names>
</name>
<name>
<surname>Strieth</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Bohnert</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ernst</surname>
<given-names>B. P.</given-names>
</name>
<name>
<surname>Rader</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The German hearing in noise test with a female talker: development and comparison with German male speech test</article-title>. <source>Eur. Archives Oto-Rhino-Laryngology</source> <volume>280</volume>, <fpage>3157</fpage>&#x2013;<lpage>3169</lpage>. <pub-id pub-id-type="doi">10.1007/s00405-023-07820-5</pub-id>
</citation>
</ref>
<ref id="B56">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mueller</surname>
<given-names>H. G.</given-names>
</name>
</person-group> (<year>2016</year>). <source>Signia expert series: speech-in-noise testing for selection and fitting of hearing aids: worth the effort?</source> <publisher-name>Audiology Online</publisher-name>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.audiologyonline.com/articles/signia-expert-series-speech-in-18336">https://www.audiologyonline.com/articles/signia-expert-series-speech-in-18336</ext-link>
</comment>.</citation>
</ref>
<ref id="B57">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mueller</surname>
<given-names>H. G.</given-names>
</name>
<name>
<surname>Ricketts</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Hornsby</surname>
<given-names>B. Y. G.</given-names>
</name>
</person-group> (<year>2023</year>). <source>20Q: speech-in-noise testing - too useful to be ignored</source>. <publisher-name>AudiologyOnline</publisher-name>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://www.audiologyonline.com/articles/20q-speech-in-noise-testing-28760">https://www.audiologyonline.com/articles/20q-speech-in-noise-testing-28760</ext-link>
</comment>.</citation>
</ref>
<ref id="B58">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>M&#xfc;ller</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>1992</year>). <article-title>Perzeptive Analyse und Weiterentwicklung eines Reimtestverfahrens f&#xfc;r die Sprachaudiometrie</article-title>. <source>Univ. G&#xf6;ttingen</source>.</citation>
</ref>
<ref id="B59">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Murphy</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Virtual reality: the next frontier of audiology</article-title>. <source>Hear. J.</source> <volume>70</volume>, <fpage>24</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1097/01.HJ.0000525521.39398.8f</pub-id>
</citation>
</ref>
<ref id="B60">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Nilsson</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Soli</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Sullivan</surname>
<given-names>J. A.</given-names>
</name>
</person-group> (<year>1994</year>). <article-title>Development of the Hearing in Noise Test for the measurement of speech reception thresholds in quiet and in noise</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>95</volume> (<issue>2</issue>), <fpage>1085</fpage>&#x2013;<lpage>1099</lpage>. <pub-id pub-id-type="doi">10.1121/1.408469</pub-id>
</citation>
</ref>
<ref id="B61">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Niquette</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Arcaroli</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Revit</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Parkinson</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Staller</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Skinner</surname>
<given-names>M.</given-names>
</name>
<etal/>
</person-group> (<year>2003</year>). &#x201c;<article-title>Development of the BKB-SIN test</article-title>,&#x201d; in <source>Annual meeting of the American auditory society</source> (<publisher-loc>AZ</publisher-loc>: <publisher-name>Scottsdale</publisher-name>).</citation>
</ref>
<ref id="B62">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ooster</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Tuschen</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Meyer</surname>
<given-names>B. T.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>Self-conducted speech audiometry using automatic speech recognition: simulation results for listeners with hearing loss</article-title>. <source>Comput. Speech Lang.</source> <volume>78</volume> (<issue>June 2021</issue>), <fpage>101447</fpage>. <pub-id pub-id-type="doi">10.1016/j.csl.2022.101447</pub-id>
</citation>
</ref>
<ref id="B63">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ozimek</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Koci&#x144;ski</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kutzner</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>S&#x119;k</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Wicher</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Speech intelligibility for different spatial configurations of target speech and competing noise source in a horizontal and median plane</article-title>. <source>Speech Commun.</source> <volume>55</volume> (<issue>10</issue>), <fpage>1021</fpage>&#x2013;<lpage>1032</lpage>. <pub-id pub-id-type="doi">10.1016/j.specom.2013.06.009</pub-id>
</citation>
</ref>
<ref id="B64">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peissig</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Kollmeier</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Directivity of binaural noise reduction in spatial multiple noise-source arrangements for normal and impaired listeners</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>101</volume> (<issue>3</issue>), <fpage>1660</fpage>&#x2013;<lpage>1670</lpage>. <pub-id pub-id-type="doi">10.1121/1.418150</pub-id>
</citation>
</ref>
<ref id="B65">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peng</surname>
<given-names>Z. E.</given-names>
</name>
<name>
<surname>Litovsky</surname>
<given-names>R. Y.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Novel approaches to measure spatial release from masking in children with bilateral cochlear implants</article-title>. <source>Ear Hear.</source> <volume>43</volume> (<issue>1</issue>), <fpage>101</fpage>&#x2013;<lpage>114</lpage>. <pub-id pub-id-type="doi">10.1097/AUD.0000000000001080</pub-id>
</citation>
</ref>
<ref id="B66">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Platte</surname>
<given-names>H. J.</given-names>
</name>
<name>
<surname>vom H&#xf6;vel</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>1980</year>). <article-title>Zur deutung der ergebnisse von sprachverstaendlichkeitsmessungen mit stoerschall im freifeld</article-title>. <source>Acta Acustica united Acustica</source> <volume>45</volume> (<issue>3</issue>), <fpage>139</fpage>&#x2013;<lpage>150</lpage>.</citation>
</ref>
<ref id="B67">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Plomp</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Mimpen</surname>
<given-names>A. M.</given-names>
</name>
</person-group> (<year>1981</year>). <article-title>Effect of the orientation of the speaker&#x2019;s head and the azimuth of a noise source on the speech-reception threshold for sentences</article-title>. <source>Acustica</source> <volume>48</volume> (<issue>5</issue>), <fpage>325</fpage>&#x2013;<lpage>328</lpage>.</citation>
</ref>
<ref id="B68">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rader</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Fastl</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Baumann</surname>
<given-names>U.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Speech perception with combined electric-acoustic stimulation and bilateral cochlear implants in a multisource noise field</article-title>. <source>Ear Hear.</source> <volume>34</volume> (<issue>3</issue>), <fpage>324</fpage>&#x2013;<lpage>332</lpage>. <pub-id pub-id-type="doi">10.1097/AUD.0b013e318272f189</pub-id>
</citation>
</ref>
<ref id="B69">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Reyes-Lecuona</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Picinali</surname>
<given-names>L.</given-names>
</name>
</person-group> (<year>2022</year>). <article-title>Unity wrapper for 3DTI</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://github.com/3DTune-In/3dti_AudioToolkit_UnityWrapper">https://github.com/3DTune-In/3dti_AudioToolkit_UnityWrapper</ext-link>.</comment>
</citation>
</ref>
<ref id="B70">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rouder</surname>
<given-names>J. N.</given-names>
</name>
<name>
<surname>Morey</surname>
<given-names>R. D.</given-names>
</name>
<name>
<surname>Speckman</surname>
<given-names>P. L.</given-names>
</name>
<name>
<surname>Province</surname>
<given-names>J. M.</given-names>
</name>
</person-group> (<year>2012</year>). <article-title>Default Bayes factors for ANOVA designs</article-title>. <source>J. Math. Psychol.</source> <volume>56</volume> (<issue>5</issue>), <fpage>356</fpage>&#x2013;<lpage>374</lpage>. <pub-id pub-id-type="doi">10.1016/j.jmp.2012.08.001</pub-id>
</citation>
</ref>
<ref id="B71">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rouder</surname>
<given-names>J. N.</given-names>
</name>
<name>
<surname>Speckman</surname>
<given-names>P. L.</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Morey</surname>
<given-names>R. D.</given-names>
</name>
<name>
<surname>Iverson</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Bayesian t tests for accepting and rejecting the null hypothesis</article-title>. <source>Psychonomic Bull. &#x26; Rev.</source> <volume>16</volume> (<issue>2</issue>), <fpage>225</fpage>&#x2013;<lpage>237</lpage>. <pub-id pub-id-type="doi">10.3758/PBR.16.2.225</pub-id>
</citation>
</ref>
<ref id="B72">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Roup</surname>
<given-names>C. M.</given-names>
</name>
<name>
<surname>Custer</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Powell</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The relationship between self&#x2012;perceived hearing ability and binaural speech&#x2012;in&#x2012;noise performance in adults with normal pure&#x2012;tone hearing</article-title>. <source>Perspectives</source> <volume>6</volume> (<issue>5</issue>), <fpage>1085</fpage>&#x2013;<lpage>1096</lpage>. <pub-id pub-id-type="doi">10.1044/2021_PERSP-21-00032</pub-id>
</citation>
</ref>
<ref id="B73">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ruggles</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Bharadwaj</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Shinn-Cunningham</surname>
<given-names>B. G.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Normal hearing is not enough to guarantee robust encoding of suprathreshold features important in everyday communication</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A.</source> <volume>108</volume> (<issue>37</issue>), <fpage>15516</fpage>&#x2013;<lpage>15521</lpage>. <pub-id pub-id-type="doi">10.1073/pnas.1108912108</pub-id>
</citation>
</ref>
<ref id="B74">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Soli</surname>
<given-names>S. D.</given-names>
</name>
<name>
<surname>Wong</surname>
<given-names>L. L. N.</given-names>
</name>
</person-group> (<year>2008</year>). <article-title>Assessment of speech intelligibility in noise with the hearing in noise test</article-title>. <source>Int. J. Audiology</source> <volume>47</volume> (<issue>6</issue>), <fpage>356</fpage>&#x2013;<lpage>361</lpage>. <pub-id pub-id-type="doi">10.1080/14992020801895136</pub-id>
</citation>
</ref>
<ref id="B75">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Strelcyk</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Dau</surname>
<given-names>T.</given-names>
</name>
</person-group> (<year>2009</year>). <article-title>Relations between frequency selectivity, temporal fine-structure processing, and speech reception in impaired hearing</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>125</volume> (<issue>5</issue>), <fpage>3328</fpage>&#x2013;<lpage>3345</lpage>. <pub-id pub-id-type="doi">10.1121/1.3097469</pub-id>
</citation>
</ref>
<ref id="B76">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Taylor</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2003</year>). <article-title>Speech-in-noise tests: how and why to include them in your basic test battery</article-title>. <source>Hear. J.</source> <volume>56</volume> (<issue>1</issue>), <fpage>40</fpage>&#x2013;<lpage>46</lpage>. <pub-id pub-id-type="doi">10.1097/01.HJ.0000293000.76300.ff</pub-id>
</citation>
</ref>
<ref id="B77">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tiippana</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Sams</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Puharinen</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Sound location can influence audiovisual speech perception when spatial attention is manipulated</article-title>. <source>Seeing Perceiving</source> <volume>24</volume> (<issue>1</issue>), <fpage>67</fpage>&#x2013;<lpage>90</lpage>. <pub-id pub-id-type="doi">10.1163/187847511X557308</pub-id>
</citation>
</ref>
<ref id="B78">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Valzolgher</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2024</year>). <article-title>Motor strategies: the role of active behavior in spatial hearing research</article-title>. <source>Psychol. Rep.</source> <volume>0</volume> (<issue>0</issue>), <fpage>332941241260246</fpage>&#x2013;<lpage>23</lpage>. <pub-id pub-id-type="doi">10.1177/00332941241260246</pub-id>
</citation>
</ref>
<ref id="B79">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Valzolgher</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Campus</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Rabini</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Gori</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pavani</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Updating spatial hearing abilities through multisensory and motor cues</article-title>. <source>Cognition</source> <volume>204</volume> (<issue>November</issue>), <fpage>104409</fpage>. <pub-id pub-id-type="doi">10.1016/j.cognition.2020.104409</pub-id>
</citation>
</ref>
<ref id="B80">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wagenmakers</surname>
<given-names>E.-J.</given-names>
</name>
</person-group> (<year>2007</year>). <article-title>A practical solution to the pervasive problems of p values</article-title>. <source>Psychonomic Bull. &#x26; Rev.</source> <volume>14</volume> (<issue>5</issue>), <fpage>779</fpage>&#x2013;<lpage>804</lpage>. <pub-id pub-id-type="doi">10.3758/BF03194105</pub-id>
</citation>
</ref>
<ref id="B81">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Warzybok</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Zokoll</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Wardenga</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Ozimek</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Boboshko</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Kollmeier</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2015</year>). <article-title>Development of the Russian matrix sentence test</article-title>. <source>Int. J. Audiology</source> <volume>54</volume> (<issue>November</issue>), <fpage>35</fpage>&#x2013;<lpage>43</lpage>. <pub-id pub-id-type="doi">10.3109/14992027.2015.1020969</pub-id>
</citation>
</ref>
<ref id="B82">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Weber</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>L&#xfc;beck</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>P&#xf6;rschmann</surname>
<given-names>C.</given-names>
</name>
</person-group> (<year>2024</year>). &#x201c;<article-title>Evaluating the influence of different generic head related transfer- functions on plausibility of binaural rendering</article-title>,&#x201d; in <source>Fortschritte der Akustik &#x2013; DAGA 2024</source> (<publisher-loc>Hannover</publisher-loc>: <publisher-name>DEGA e.V. Berlin</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</citation>
</ref>
<ref id="B83">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Werner</surname>
<given-names>L. A.</given-names>
</name>
<name>
<surname>Fay</surname>
<given-names>R. R.</given-names>
</name>
<name>
<surname>Popper</surname>
<given-names>A. N.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>Human auditory development springer handbook of auditory research</article-title>,&#x201d;. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer Springer Handbook of Auditory Research</publisher-name>. <pub-id pub-id-type="doi">10.1007/978-1-4614-1421-6</pub-id>
</citation>
</ref>
<ref id="B84">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Westfall</surname>
<given-names>P. H.</given-names>
</name>
<name>
<surname>Johnson</surname>
<given-names>W. O.</given-names>
</name>
<name>
<surname>Utts</surname>
<given-names>J. M.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>A bayesian perspective on the bonferroni adjustment</article-title>. <source>Biometrika</source> <volume>84</volume> (<issue>2</issue>), <fpage>419</fpage>&#x2013;<lpage>427</lpage>. <pub-id pub-id-type="doi">10.1093/biomet/84.2.419</pub-id>
</citation>
</ref>
<ref id="B85">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Williams</surname>
<given-names>B. T.</given-names>
</name>
<name>
<surname>Viswanathan</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Brouwer</surname>
<given-names>S.</given-names>
</name>
</person-group> (<year>2023</year>). <article-title>The effect of visual speech information on linguistic release from masking</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>153</volume> (<issue>1</issue>), <fpage>602</fpage>&#x2013;<lpage>612</lpage>. <pub-id pub-id-type="doi">10.1121/10.0016865</pub-id>
</citation>
</ref>
<ref id="B86">
<citation citation-type="journal">
<collab>World Medical Association</collab> (<year>2013</year>). <article-title>World Medical Association Declaration of Helsinki: ethical principles for medical research involving human subjects</article-title>. <source>JAMA. Revis. Ed.</source> <volume>310</volume> (<issue>20</issue>), <fpage>2191</fpage>&#x2013;<lpage>2194</lpage>. <pub-id pub-id-type="doi">10.1001/jama.2013.281053</pub-id>
</citation>
</ref>
<ref id="B87">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yuan</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Lleo</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Daniel</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>White</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>The impact of temporally coherent visual cues on speech perception in complex auditory environments</article-title>. <source>Front. Neurosci.</source> <volume>15</volume> (<issue>June</issue>), <fpage>678029</fpage>. <pub-id pub-id-type="doi">10.3389/fnins.2021.678029</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>