<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2026.1736957</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Dynamic sensor adaptation based on efferent feedback for adaptive bio-inspired sound source localization</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Durstewitz</surname> <given-names>Steve</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<uri xlink:href="https://loop.frontiersin.org/people/3348851"/>
</contrib>
<contrib contrib-type="author" equal-contrib="yes">
<name><surname>Schmid</surname> <given-names>Daniel</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="author-notes" rid="fn001"><sup>&#x02020;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/972200"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Oess</surname> <given-names>Timo</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<uri xlink:href="https://loop.frontiersin.org/people/362525"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ghazanfari</surname> <given-names>Hesan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/3348842"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Neumann</surname> <given-names>Heiko</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/75766"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Ernst</surname> <given-names>Marc O.</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<uri xlink:href="https://loop.frontiersin.org/people/340616"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Lenk</surname> <given-names>Claudia</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/927204"/>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Group of Biomedical Sensor Systems and Microsystems, Universit&#x000E4;t Ulm</institution>, <city>Ulm</city>, <country country="de">Germany</country></aff>
<aff id="aff2"><label>2</label><institution>Institute of Neural Information Processing, Universit&#x000E4;t Ulm</institution>, <city>Ulm</city>, <country country="de">Germany</country></aff>
<aff id="aff3"><label>3</label><institution>Applied Cognitive Psychology, Universit&#x000E4;t Ulm</institution>, <city>Ulm</city>, <country country="de">Germany</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Claudia Lenk, <email xlink:href="mailto:claudia.lenk@uni-ulm.de">claudia.lenk@uni-ulm.de</email></corresp>
<fn fn-type="equal" id="fn001"><label>&#x02020;</label><p>These authors share first authorship</p></fn></author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-28">
<day>28</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1736957</elocation-id>
<history>
<date date-type="received">
<day>31</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>24</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>01</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Durstewitz, Schmid, Oess, Ghazanfari, Neumann, Ernst and Lenk.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Durstewitz, Schmid, Oess, Ghazanfari, Neumann, Ernst and Lenk</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-28">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Auditory perception and localization are fundamental tasks for many species, allowing them to detect, identify, and spatially localize sound sources in their environment. While biological systems have evolved sophisticated neural mechanisms for auditory adaptation, artificial auditory systems still struggle to match their performance, particularly in dynamic and noisy environments. Our research focuses on whether sensor adaptation, driven by efferent feedback from the processing stage to the sensory stage, can improve localization performance. Inspired by human sound source localization based on interaural level differences (ILD) and efferent feedback, the proposed neuromorphic system architecture is composed of two bio-inspired acoustic sensors connected to a neural processing stage, represented by two neurons of the medial nucleus of the trapezoid body (MNTB) and two neurons of the lateral superior olive (LSO). The LSO neuron response was analyzed in the following ways: (i) using measured sensor responses at different ILD without efferent feedback and with a fixed local feedback for each sensor measurement; (ii) simulated with synthetically generated sounds with varying ILDs for four different feedback configurations from the LSO neuron to the acoustic sensors. Results from (i) showed how the feedback tuning can be used to overcome mismatches due to fabrication tolerances between different MEMS sensors, and (ii) showed the influence of different feedback configurations and simulation parameters on the LSO neuron response with respect to different ILDs.</p></abstract>
<kwd-group>
<kwd>dynamic sensor adaptation</kwd>
<kwd>efferent feedback</kwd>
<kwd>interaural level differences</kwd>
<kwd>lateral superior olivary complex</kwd>
<kwd>neural network</kwd>
<kwd>neuromorphic computing</kwd>
<kwd>recurrent processing</kwd>
<kwd>sound source localization</kwd>
</kwd-group>
<funding-group>
<award-group id="gs1">
<funding-source id="sp1">
<institution-wrap>
<institution>Carl-Zeiss-Stiftung</institution>
<institution-id institution-id-type="doi" vocab="open-funder-registry" vocab-identifier="10.13039/open_funder_registry">10.13039/100007569</institution-id>
</institution-wrap>
</funding-source>
</award-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was funded by the Carl-Zeiss-Stiftung within the project NeuroSensEar.</funding-statement>
</funding-group>
<counts>
<fig-count count="8"/>
<table-count count="1"/>
<equation-count count="13"/>
<ref-count count="62"/>
<page-count count="17"/>
<word-count count="13020"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Neuroprosthetics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Localizing, segregating, and identifying different sound sources in natural sound scenes are extremely difficult tasks, yet animals perform them effortlessly in their everyday lives across a wide range of environments and a varying number of sound sources (<xref ref-type="bibr" rid="B3">Bregman, 1990</xref>). The human auditory system, in particular, exhibits remarkable capabilities, enabling individuals to adapt to and focus on specific sound sources even in noisy conditions &#x02013; a phenomenon known as the &#x0201C;cocktail party effect&#x0201D; (<xref ref-type="bibr" rid="B34">McDermott, 2009</xref>; <xref ref-type="bibr" rid="B6">Cherry, 1953</xref>). This impressive ability relies on two mechanisms: first, combining the outcomes from different processing tasks, such as sound recognition and identification (e.g., speech and traffic noise), which improves speech-in-noise perception, and second, adaptation of signal sensing and processing. Machine hearing, in contrast, struggles strongly with sound-in-noise perception (<xref ref-type="bibr" rid="B33">Mawalim et al., 2024</xref>; <xref ref-type="bibr" rid="B26">Kumar et al., 2024</xref>). Thus, we present a bio-inspired approach and its neuromorphic implementation that combine adaptation and binaural sound-source localization to improve machine hearing for sound-in-noise perception.</p>
<p>Binaural hearing is a key factor in improving speech recognition in noisy environments and in correctly localizing a sound (<xref ref-type="bibr" rid="B24">Kidd et al., 2005</xref>; <xref ref-type="bibr" rid="B7">Culling and Lavandier, 2021</xref>). Particularly in the case of informational masking, in which similar sound signals are mixed, e.g., target speech with background speech, spatial release from masking by localizing the different sound sources is important for following and understanding the target signal (<xref ref-type="bibr" rid="B39">Papesh et al., 2017</xref>). Two important cues enable this localization: the interaural level difference (ILD), naturally created by the acoustic shadow of the head (see <xref ref-type="fig" rid="F1">Figure 1A</xref>), and the interaural time difference (ITD) created by the differences in arrival time of sound signals at the left and right ear. The superior olive complex (SOC), a brainstem nucleus and the first site of binaural integration, is the key structure in binaural signal integration for the computation of ILDs. Neurons in the lateral part of the SOC, the LSO, receive excitatory input from the ipsilateral cochlear nucleus and inhibitory input from the contralateral cochlear nucleus via a transmitter, the medial nucleus of the trapezoid body (MNTB), see <xref ref-type="fig" rid="F1">Figure 1B</xref>. This interaction of ipsilateral excitatory and contralateral inhibitory inputs creates a dynamic balance of ILD values encoding the location of a sound source (<xref ref-type="bibr" rid="B50">Tollin, 2003</xref>). Thereby, it is important that the sensitivity of the sensors and neuronal excitability are similar for both sides and across frequencies to enable a reasonable encoding of ILD (<xref ref-type="bibr" rid="B9">Darrow et al., 2006</xref>).</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Efferent feedback and sound source localization. <bold>(A)</bold> Interaural level differences (ILDs), shown by the signals, due to the head shadow. <bold>(B)</bold> Schematic representation of stages of human ILD analysis and efferent feedback to the cochlea. Hair cells transduce sound waves into electrical activation on the auditory nerve. These signals pass through the cochlear nucleus (CN) and are then forwarded either to the superior olive complex (SOC) or the medial nucleus of the trapezoid body (MNTB). The SOC output is forwarded to additional brainstem areas, such as the inferior colliculus (not shown). Efferent connections (green lines) stem from the SOC and can be ipsi- or contralateral connections to the hair cells and auditory nerve fibers in the cochlea. <bold>(C)</bold> Dynamic adaptation (purple and dark red curves) due to either local feedback and modulation or efferent feedback can yield a change in sensitivity (slope of response) or ILD range with the highest response (or sensitivity). <bold>(D)</bold> Dynamic adaptation can decrease the influence of sensor differences and balance the left and right input to the SOC or higher brain stems to improve perceptual sensitivity to ILD. In this case, the adaptation is mainly driven by efferent feedback.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0001.tif">
<alt-text content-type="machine-generated">Diagram showcasing auditory processing: (A) Sound wave propagation depicted with arcs and waveform. (B) Inner ear anatomy showing neural pathways, including hair cells, CN, SOC, and MNTB. (C) Graphs illustrating dynamic adaptation to signals, comparing neuron responses in non-adapted and adapted states. (D) Graphs on dynamic adaptation to sensor difference, with and without efferent feedback, showing variations in left-right differences across frequencies.</alt-text>
</graphic>
</fig>
<p>As a second key factor in sound-in-noise perception (e.g., the cocktail party effect), several adaptation mechanisms are implemented along the auditory processing pathway. Such adaptation mechanisms start at the sensory level, by changing the sensitivity of hair cells and neuronal spike-rate adaptation to constant sounds, and reach all the way to the cortex (<xref ref-type="bibr" rid="B57">Willmore and King, 2023</xref>; <xref ref-type="bibr" rid="B22">Khalighinejad et al., 2019</xref>; <xref ref-type="bibr" rid="B1">Angeloni et al., 2023</xref>). Adaptation is thereby driven by local information, like input and signal statistics, or by efferent feedback from downstream areas based on processing performance and goals (<xref ref-type="bibr" rid="B8">Dahmen et al., 2010</xref>; <xref ref-type="bibr" rid="B1">Angeloni et al., 2023</xref>; <xref ref-type="bibr" rid="B16">Guinan, 2018</xref>; <xref ref-type="bibr" rid="B31">Lopez-Poveda, 2018</xref>). A major area for producing such efferent feedback signals to the sensory stage, i.e., the hair cells and auditory nerve fibers in the cochlea (see green connections in <xref ref-type="fig" rid="F1">Figure 1B</xref>)depicts the SOC (<xref ref-type="bibr" rid="B62">Zhao et al., 2022</xref>; <xref ref-type="bibr" rid="B17">Guinan Jr, 2006</xref>; <xref ref-type="bibr" rid="B13">Frank and Goodrich, 2018</xref>). It has been shown that this feedback can change the gain and dynamic range of the sensors and neuronal responses, enhance signal contrast, and improve signal perception in background noise (<xref ref-type="bibr" rid="B16">Guinan, 2018</xref>; <xref ref-type="bibr" rid="B31">Lopez-Poveda, 2018</xref>). Furthermore, regarding sound source localization, efferent feedback is discussed to improve interaural sensitivity for ILD encoding (<xref ref-type="bibr" rid="B9">Darrow et al., 2006</xref>), as schematically shown in <xref ref-type="fig" rid="F1">Figure 1C</xref>. Furthermore, some brainstem areas exhibit adaptation to sound-source locations (<xref ref-type="bibr" rid="B47">Stange et al., 2013</xref>; <xref ref-type="bibr" rid="B15">Gleiss et al., 2019</xref>; <xref ref-type="bibr" rid="B28">Lingner et al., 2018</xref>), enabling better spatial hearing. For example, adaptation in dependence of the mean or variance of the ILD signal distribution is observed, combined with a shifting of the mean of the ILD response or the perceptual sensitivity (<xref ref-type="bibr" rid="B8">Dahmen et al., 2010</xref>) (see <xref ref-type="fig" rid="F1">Figure 1D</xref>). The latter refers to balancing the left and right inputs to the brain stem area to improve sensitivity.</p>
<p>Efferent feedback connections can be ipsilateral as well as contralateral, as indicated by the green lines in <xref ref-type="fig" rid="F1">Figure 1B</xref>, and the feedback signals can be excitatory, inhibitory, or modulatory (<xref ref-type="bibr" rid="B16">Guinan, 2018</xref>; <xref ref-type="bibr" rid="B31">Lopez-Poveda, 2018</xref>). While the topology and signal types are well resolved, the underlying mechanisms of efferent feedback and how it can improve hearing, e.g., for speech-in-noise or sound source localization, remain under debate. Furthermore, only a few models exist, describing the mechanisms of efferent feedback (<xref ref-type="bibr" rid="B12">Farhadi et al., 2023</xref>). Since efferent feedback directly tunes the sensory input to the following stages, it is particularly unclear how it affects different processing tasks and stages. For example, if efferent feedback is excited by noise to improve sound-in-noise perception, does this change in sensory properties affect the performance of the sound source localization stage at the same time?</p>
<p>While biological systems have evolved sophisticated mechanisms for robust, efficient, and real-time sound perception, particularly in dynamic and noisy environments, artificial auditory systems still struggle to match their performance (<xref ref-type="bibr" rid="B40">Patman and Chodroff, 2024</xref>). Integrating bio-inspired algorithms and using hardware-based, neuromorphic implementations can improve sound-in-noise recognition and increase the system&#x00027;s efficiency (<xref ref-type="bibr" rid="B2">Araujo et al., 2020</xref>; <xref ref-type="bibr" rid="B29">Liu et al., 2014</xref>). In this context, artificial cochleae were developed to replicate biological hearing processes. Such bio-inspired devices utilize nonlinear filtering and frequency decomposition techniques to improve the perception of speech and environmental sounds as well as efficiently encode important sound features as amplitude or frequency (<xref ref-type="bibr" rid="B52">van Schaik and Liu, 2005</xref>; <xref ref-type="bibr" rid="B19">Jim&#x000E9;nez-Fernandez et al., 2017</xref>; <xref ref-type="bibr" rid="B54">Wang et al., 2015</xref>; <xref ref-type="bibr" rid="B29">Liu et al., 2014</xref>; <xref ref-type="bibr" rid="B60">Yang et al., 2016</xref>; <xref ref-type="bibr" rid="B18">Hamilton et al., 2008</xref>; <xref ref-type="bibr" rid="B49">Thakur et al., 2014</xref>; <xref ref-type="bibr" rid="B59">Xu et al., 2018b</xref>; <xref ref-type="bibr" rid="B46">Singh et al., 2019</xref>; <xref ref-type="bibr" rid="B36">Nouri et al., 2015</xref>). Furthermore, neuromorphic implementations of sound source localization algorithms have been developed (<xref ref-type="bibr" rid="B38">Oess et al., 2020b</xref>; <xref ref-type="bibr" rid="B45">Schoepe et al., 2023</xref>; <xref ref-type="bibr" rid="B44">Schmid et al., 2023</xref>) that analyze and encode ILDs or ITDs efficiently and in real time. Combining artificial cochlea and ILD/ITD analysis with phonotaxis, i.e., turning a robotic head or body movement towards a sound source, can be efficiently implemented (<xref ref-type="bibr" rid="B45">Schoepe et al., 2023</xref>; <xref ref-type="bibr" rid="B42">Reeve et al., 2005</xref>), which in turn can help improve sound perception in noisy environments.</p>
<p>In some of these systems, adaptation was introduced to adjust gain, dynamic range, or sensitivity, see, e.g., (<xref ref-type="bibr" rid="B25">Kiselev and Liu, 2021</xref>), which can improve localization (<xref ref-type="bibr" rid="B37">Oess et al., 2020a</xref>). However, existing systems often exhibit limited adaptability, rendering them susceptible to variations in signal-to-noise ratio and dynamic acoustic conditions. In particular, the sensing element, i.e., the microphone, is typically not adaptable, and thus its sensitivity cannot be tuned. Recently, we introduced an adaptive microelectromechanical system (MEMS) cochlea (<xref ref-type="bibr" rid="B27">Lenk et al., 2023</xref>) that comprises artificial hair cells obtained via MEMS-based transducers in combination with electronic feedback. Thus, this artificial cochlea can mimic the frequency selectivity, adaptability, and dynamic range of its biological counterpart. Tuning the feedback parameters can enhance the response to low-signal-to-noise conditions, increase the dynamic range, and highlight/extract important sound features such as sound onset (<xref ref-type="bibr" rid="B11">Durstewitz et al., 2022</xref>; <xref ref-type="bibr" rid="B53">Ved et al., 2024</xref>). All the above-mentioned implemented adaptations are based on signal statistics and do not include information from downstream processes, unlike efferent feedback. Nevertheless, implementing a sound coding strategy based on the medial olivocochlear reflex, which is part of the efferent system, in cochlear implants can improve speech intelligibility in the presence of speech maskers (<xref ref-type="bibr" rid="B32">Lopez-Poveda et al., 2017</xref>).</p>
<p>Our goal is to integrate efferent feedback into a neuromorphic sound source localization system to study the effect of sensor adaptation driven by different types and topologies of efferent feedback on the bio-inspired detection of ILD. By incorporating such an adaptive element, we aim to replicate the adaptivity and efficiency of biological auditory processing, particularly for sound source localization, and to improve the long-term performance of neuromorphic hearing devices. To achieve this, our bio-inspired architecture couples an artificial cochlea with adaptive feedback from lateral superior olive (LSO) neurons, enabling dynamic, real-time optimization of sound source localization (see <xref ref-type="fig" rid="F1">Figure 1</xref>). In this approach, the MEMS cochlea (<xref ref-type="bibr" rid="B27">Lenk et al., 2023</xref>) is combined with a computational LSO model (<xref ref-type="bibr" rid="B37">Oess et al., 2020a</xref>). The model, composed of conductance-based neurons, integrates excitatory input from the ipsilateral ear with inhibitory input from the contralateral ear to encode ILD cues. Crucially, the neuron model can be deployed directly on neuromorphic hardware (<xref ref-type="bibr" rid="B44">Schmid et al., 2023</xref>), making the combination with the neuromorphic cochlea an architecture inherently suited for energy-efficient, embedded auditory sensing. The newly implemented adaptive feedback loop forms a bidirectional mechanism where LSO neuronal activity influences the tuning of the MEMS cochlea, which in turn influences LSO activity. By closing the loop between cochlear sensing and LSO-driven adaptation, the system mimics key aspects of the biological auditory pathway, enabling dynamic adjustments in response to changing acoustic environments. In the following study, we first explore whether efferent feedback from LSO neurons to the artificial cochlea can be used to tune the cochlea&#x00027;s sensitivity and the LSO neuron&#x00027;s response, thus enabling a wider perceptual range of input. Second, we study whether the efferent feedback in this system can help overcome sensor differences, e.g., due to fabrication tolerances, thereby balancing the interaural sensitivities. Finally, we demonstrate that gain adaptation can lead to temporal encoding of inputs in LSO neurons. This has two beneficial consequences: first, the system is highly responsive to onsets of sounds, thus enabling fast reactions to stimuli, and second, the time course of the response can be used as additional encoding of the ILD, if the sensitivity of the LSO neuron is not high enough. This allows the system to maintain a wide range of input sensitivity, enabling quick reaction to a stimulus while also allowing high precision in stimulus direction when necessary.</p>
<p>In the following section, we present the system architecture and the different types and topologies of efferent feedback. Then, the ILD analysis by the system is described. In sec. 3, the results from measurements and simulations of the system with and without efferent feedback will be presented and discussed. Finally, we will present our conclusions from this study.</p>
</sec>
<sec id="s2">
<label>2</label>
<title>Methods</title>
<sec>
<label>2.1</label>
<title>System setup</title>
<sec>
<label>2.1.1</label>
<title>Architectural overview</title>
<p>Inspired by the sound source localization of humans using ILDs, the here implemented system (<xref ref-type="fig" rid="F2">Figure 2</xref>) detects ILD from sound sources by a two-stage processing: (i) speaker input is fed to two bio-inspired acoustic sensors, modeling the left and right ear input and cochlear processing. (ii) Then, a subsequent neural processing stage combines the sensing outputs from both sides and forms an ILD estimate. Both parts, the bio-inspired sensors and the neural processing stage, are explained in detail in the following Subsections 2.1.2 and 2.1.3, respectively. Finally, the output of the neuronal processing is used to drive the feedback to the sensor stage to model efferent feedback. The different feedback algorithms are detailed in Section 2.1.4. How the system is investigated for ILD analysis within different experimental settings is described in Section 2.2.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>System architecture overview. Simplified architectural overview of the simulation system with two speakers, two acoustic sensors (left and right), and one neural processing stage. Each acoustic sensor consists of a MEMS sensor with pre-amplification, an envelope generator connected to the neural processing stage, and feedback to the sensor based on amplification factor <italic>a</italic><sub><italic>f</italic></sub>(<italic>t</italic>) and bias offset <italic>u</italic><sub><italic>dc</italic></sub>, as well as the input from neural processing <italic>FB</italic><sub><italic>i</italic></sub> and a feedback limiter. The neural processing stage consists of MNTB and LSO neurons with membrane potential (<italic>q, r</italic>) and activation function (<italic>g</italic>, &#x003C3;). Their input is scaled and is only excitatory for the MNTB neuron, and excitatory and inhibitory for the LSO neuron. &#x003C3; is the final LSO output of the system.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0002.tif">
<alt-text content-type="machine-generated">Diagram of a neural processing system involving left and right acoustic sensors. Each sensor uses an envelope generator to generate output and receives feedback that is fed to a limiter before entering back into the MEMS sensor. Inputs from both sensors flow to a neural processing stage with neurons labeled MNTB and LSO per left and right hemisphere. Left sensor output is depicted in orange color, right sensor output is depicted in green color. MNTB neurons receive input from the opposite sensor and provide inhibitory input to LSO neurons. LSO neurons receive excitatory input from the sensor on the same side. Excitatory input is depicted by diamond arrow heads, inhibitory input is depicted by round arrow heads.</alt-text>
</graphic>
</fig>
</sec>
<sec>
<label>2.1.2</label>
<title>Acoustic sensor</title>
<p>Each acoustic sensor consists of a MEMS cantilever with pre-amplification electronics, an envelope generator, and feedback to the sensor. The MEMS sensor is a silicon cantilever with an integrated piezo-resistive readout to transduce the beam bending due to sound excitation into a voltage signal <italic>u</italic><sub><italic>s</italic></sub>(<italic>t</italic>). Due to its resonant operation, each MEMS cantilever is mostly responsive to its resonance frequency and thus acts as a band-pass filter with linear transfer characteristics. Its resonance frequency depends on the geometric dimensions of the cantilever. Furthermore, an aluminum heater is integrated into the MEMS as a thermomechanical actuator. This allows actuating the cantilever and introducing additional beam bending, which enables tuning of the sensor (<xref ref-type="bibr" rid="B41">Rangelow et al., 2017</xref>; <xref ref-type="bibr" rid="B27">Lenk et al., 2023</xref>). Using a simple feedback mechanism</p>
<disp-formula id="EQ1"><mml:math id="M1"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x000B7;</mml:mo><mml:msub><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(1)</label></disp-formula>
<p>consisting of the amplified, high-pass filtered sensor response <italic>u</italic><sub><italic>ac</italic></sub>(<italic>t</italic>) and a bias voltage <italic>u</italic><sub><italic>dc</italic></sub>, tuning of the sensitivity, bandwidth, and linearity of the cantilever&#x00027;s response is possible (<xref ref-type="bibr" rid="B53">Ved et al., 2024</xref>). Particularly, if <italic>a</italic><sub><italic>f</italic></sub>(<italic>t</italic>) is tuned close to a critical point <italic>a</italic><sub><italic>crit</italic></sub>, the cantilever behaves like a small-signal amplifier with compressive transfer characteristics. At <italic>a</italic><sub><italic>crit</italic></sub> the system undergoes a Hopf bifurcation leading to autonomous oscillations (<xref ref-type="bibr" rid="B20">Johann Rolf and Meurer, 2023</xref>). <italic>a</italic><sub><italic>crit</italic></sub> can be analytically calculated using the system parameters like filter time constants and transfer factor (<xref ref-type="bibr" rid="B27">Lenk et al., 2023</xref>; <xref ref-type="bibr" rid="B20">Johann Rolf and Meurer, 2023</xref>)). This tunability can be used to extract sound features, such as on- and offset (<xref ref-type="bibr" rid="B11">Durstewitz et al., 2022</xref>, <xref ref-type="bibr" rid="B10">2024</xref>) and, in conjunction with the frequency decomposition, for signal pre-processing. The latter helps to improve speech processing in noisy conditions (<xref ref-type="bibr" rid="B21">Johny et al., 2024</xref>).</p>
<p>The model description of the cantilever&#x00027;s response is based on the Euler-Bernoulli beam theory and was derived in <xref ref-type="bibr" rid="B43">Roeser et al. (2016)</xref>; <xref ref-type="bibr" rid="B27">Lenk et al. (2023)</xref>. In this mass-normalized model equation (terms are expressed in units of acceleration <inline-formula><mml:math id="M2"><mml:mfrac><mml:mrow><mml:mtext>m</mml:mtext></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mtext>s</mml:mtext></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac></mml:math></inline-formula>), the deflection <italic>x</italic>(<italic>t</italic>) of the free end of the cantilever due to the thermal-mechanical actuation &#x003B1;<sub><italic>s</italic></sub>&#x003B8;(<italic>t</italic>) and an external force <inline-formula><mml:math id="M3"><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mo>&#x0007E;</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>e</mml:mi><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula> generated by the sound level can be described by the following second-order ordinary differential equation (ODE):</p>
<disp-formula id="EQ2"><mml:math id="M4"><mml:mrow><mml:mover accent='true'><mml:mi>x</mml:mi><mml:mo>&#x000A8;</mml:mo></mml:mover><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>+</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mi>&#x003C9;</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mi>Q</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:mfrac><mml:mover accent='true'><mml:mi>x</mml:mi><mml:mo>&#x002D9;</mml:mo></mml:mover><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>+</mml:mo><mml:msubsup><mml:mi>&#x003C9;</mml:mi><mml:mn>0</mml:mn><mml:mn>2</mml:mn></mml:msubsup><mml:mi>x</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x003B1;</mml:mi><mml:mi>s</mml:mi></mml:msub><mml:mi>&#x003B8;</mml:mi><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mover accent='true'><mml:mi>F</mml:mi><mml:mo>&#x002DC;</mml:mo></mml:mover><mml:mrow><mml:mi>e</mml:mi><mml:mi>x</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:math><label>(2)</label></disp-formula>
<p>where &#x003C9;<sub>0</sub> &#x0003D; 2&#x003C0;&#x000B7;<italic>f</italic> with the resonance frequency <italic>f</italic>, <italic>Q</italic><sub>0</sub> denotes the quality factor, &#x003B1;<sub><italic>s</italic></sub> is the transfer factor from temperature to deflection, and &#x003B8;(<italic>t</italic>) is the temperature difference between the beam and its ambient temperature.</p>
<p>The change in the temperature difference is caused by the actuation and can be written as</p>
<disp-formula id="EQ3"><mml:math id="M5"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mover accent="true"><mml:mrow><mml:mi>&#x003B8;</mml:mi></mml:mrow><mml:mo>.</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mi>&#x003B8;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B3;</mml:mi></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mo class="qopname">tanh</mml:mo><mml:msub><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>R</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(3)</label></disp-formula>
<p>with the time constant &#x003B2;<sub><italic>s</italic></sub>, the transfer factor from voltage to temperature &#x003B3;<sub><italic>s</italic></sub>, and the applied actuation voltage <italic>u</italic><sub><italic>act</italic></sub>(<italic>t</italic>), limited by tanh, to the thermo-mechanical heater with the heater resistance <italic>R</italic>. The electrical output of the cantilever <italic>u</italic><sub><italic>s</italic></sub>(<italic>t</italic>) &#x0003D; &#x003BA;<sub><italic>s</italic></sub>&#x000B7;<italic>x</italic>(<italic>t</italic>), determined by the deflection <italic>x</italic>(<italic>t</italic>) and a factor &#x003BA;<sub><italic>s</italic></sub> due the piezo-resistive elements and pre-amplification, is high-pass filtered</p>
<disp-formula id="EQ4"><mml:math id="M6"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mo>.</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>h</mml:mi><mml:mi>p</mml:mi><mml:mi>f</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mo>.</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>s</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(4)</label></disp-formula>
<p>with the time constant &#x003C4;<sub><italic>hpf</italic></sub> to remove the static beam deflection. Next, the upper envelope <italic>u</italic><sub><italic>env</italic></sub> is extracted by rectifying and low-pass filtering</p>
<disp-formula id="EQ5"><mml:math id="M7"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>l</mml:mi><mml:mi>p</mml:mi><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mover accent="true"><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mo>.</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>|</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(5)</label></disp-formula>
<p>with time constant &#x003C4;<sub><italic>lpf</italic></sub>. This <italic>u</italic><sub><italic>env</italic></sub>(<italic>t</italic>) is the output of the sensing stage and the input to the neural processing stage. The high- and low-pass filter equations are derived by applying Kirchhoff&#x00027;s laws to simple resistor-capacitor circuits and can be used for hardware-based implementations of the filter functions. The filtered beam response <italic>u</italic><sub><italic>ac</italic></sub>(<italic>t</italic>) is further used for feedback to the sensor by generating the actuation signal <italic>u</italic><sub><italic>act</italic></sub>(<italic>t</italic>) as given by Equation 1. Here, depending on the experimental setting (described in Section 2.2) <italic>a</italic><sub><italic>f</italic></sub>(<italic>t</italic>) is either a constant value or given by the input <italic>FB</italic><sub><italic>i</italic></sub> from the neural processing stage to model the efferent feedback. This is described in detail in Section 2.1.4.</p>
</sec>
<sec>
<label>2.1.3</label>
<title>Neural processing</title>
<p>The architecture of the neural processing stage comprises LSO and MNTB neurons and is based on the model in <xref ref-type="bibr" rid="B37">Oess et al. (2020a)</xref>. This model captures the main temporal dynamics of how a circuit of representative biological LSO and MNTB neurons computes their membrane potential and firing rate activity and is based on experimental evidence. To clarify how the MNTB and LSO neurons contribute to extracting ILDs from the envelope of the sensor output <italic>u</italic><sub><italic>env</italic></sub>, the underlying mathematical formulations are presented below. The model utilizes single-compartment conductance-based equations to describe the evolution of the neurons&#x00027; membrane potentials.</p>
<p>The state of the MNTB neuron is described by its membrane potential <italic>q</italic> and evolves according to</p>
<disp-formula id="EQ6"><mml:math id="M8"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mover accent="true"><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mo>.</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mi>q</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(6)</label></disp-formula>
<p>with the time constant &#x003C4;<sub><italic>q</italic></sub>, the leakage part &#x003B1;<sub><italic>q</italic></sub><italic>q</italic> with the decay rate &#x003B1;<sub><italic>q</italic></sub> and the excitatory input obtained from the scaled sensor response <italic>s</italic><sub><italic>q</italic></sub>(<italic>t</italic>) &#x0003D; <italic>u</italic><sub><italic>env</italic></sub>(<italic>t</italic>)&#x000B7;<italic>w</italic><sub><italic>mntb, exc</italic></sub> and an additional scaling factor &#x003B2;<sub><italic>q</italic></sub>. The firing rate of the MNTB neuron is computed from the membrane potential by</p>
<disp-formula id="EQ7"><mml:math id="M9"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>g</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>q</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mi>q</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>&#x0002B;</mml:mo></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo class="qopname">max</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>q</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(7)</label></disp-formula>
<p>i.e., using a half-wave rectification as an activation function. The LSO neuron membrane potential <italic>r</italic> evolves according to a conductance-based model description.</p>
<disp-formula id="EQ8"><mml:math id="M10"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mover accent="true"><mml:mrow><mml:mi>r</mml:mi></mml:mrow><mml:mo>.</mml:mo></mml:mover><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:mi>r</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mi>s</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003B3;</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003BA;</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub><mml:mi>r</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>g</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>q</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(8)</label></disp-formula>
<p>Here, the rate of change is the sum of three terms: a decay term &#x02212;&#x003B1;<sub><italic>r</italic></sub><italic>r</italic>, an excitatory term (&#x003B2;<sub><italic>r</italic></sub>&#x02212;<italic>r</italic>(<italic>t</italic>))<italic>s</italic><sub><italic>r</italic></sub>(<italic>t</italic>) and an inhibitory term &#x02212;(&#x003B3;<sub><italic>r</italic></sub>&#x0002B;&#x003BA;<sub><italic>r</italic></sub><italic>r</italic>(<italic>t</italic>))<italic>g</italic>(<italic>q</italic>(<italic>t</italic>)). The decay term with the decay rate &#x003B1;<sub><italic>r</italic></sub> leads to an exponential decay toward a resting potential of 0 in the case where no excitatory or inhibitory input is present. The excitatory input <italic>s</italic><sub><italic>r</italic></sub>(<italic>t</italic>) &#x0003D; <italic>u</italic><sub><italic>env</italic></sub>(<italic>t</italic>)&#x000B7;<italic>w</italic><sub><italic>lso, exc</italic></sub> comes from the sensor envelope and is constrained by the force term (&#x003B2;<sub><italic>r</italic></sub>&#x02212;<italic>r</italic>(<italic>t</italic>)) with an upper saturation level described by the reversal potential &#x003B2;<sub><italic>r</italic></sub>. The inhibitory input <italic>g</italic>(<italic>q</italic>(<italic>t</italic>)) is the output of the MNTB neuron and is constrained by the force term &#x02212;&#x003B3;<sub><italic>r</italic></sub>&#x02212;&#x003BA;<sub><italic>r</italic></sub><italic>r</italic>(<italic>t</italic>). This term bounds the membrane potential by a lower saturation level described by a reversal potential of <inline-formula><mml:math id="M11"><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003B3;</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003BA;</mml:mi></mml:mrow><mml:mrow><mml:mi>r</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfrac></mml:math></inline-formula> with the parameters &#x003B3;<sub><italic>r</italic></sub> for subtractive- and &#x003BA;<sub><italic>r</italic></sub> for divisive-type of neural inhibition. Finally, the firing rate of the LSO neuron is obtained from the sigmoidal activation function &#x003C3;(<italic>r</italic>(<italic>t</italic>))</p>
<disp-formula id="EQ9"><mml:math id="M12"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003C3;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>r</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x0002B;</mml:mo><mml:mo class="qopname">exp</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>r</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x000B7;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(9)</label></disp-formula>
<p>with the parameters &#x003B1;<sub>&#x003C3;</sub> and &#x003B2;<sub>&#x003C3;</sub> for shaping the steepness and offset of the sigmoid, respectively.</p>
</sec>
<sec>
<label>2.1.4</label>
<title>Efferent feedback configurations</title>
<p>As previously mentioned, efferent feedback modulates the gain, dynamic range, and shifts the mean of the ILD response. Inspired by this mechanism used for adaptation, the output of the neuronal processing stage is used to change the feedback to the sensor, as indicated in <xref ref-type="fig" rid="F2">Figure 2</xref>. Different configurations are conceivable. Here, we focus on four configurations that (i) cover a broad range of possible system interactions (positive versus negative feedback loops and same-sided versus opposite-sided projections), and (ii) can be easily and efficiently realized in hardware. The four configurations (<xref ref-type="fig" rid="F3">Figures 3B</xref>&#x02013;<xref ref-type="fig" rid="F3">E</xref>) are determined by in- and outputs of the interface shown in <xref ref-type="fig" rid="F3">Figure 3A</xref>. Based on the sign of the feedback, two configurations from a positive feedback loop (B,C), while the other two describe a negative feedback (D,E). The projection side defines another discrimination criterion. Here, ipsilateral refers to feedback from the LSO neuron to the sensor on the same side, i.e., left-to-left side or vice versa. Contralateral is the feedback from the LSO neuron to the sensor on the opposite side.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Implemented feedback configurations from the LSO-MNTB subsystem onto the acoustic sensors. <bold>(A)</bold> Simplified system interface representation with feedback to the left and right acoustic sensor (<italic>FB</italic><sub><italic>left, j</italic></sub>, <italic>FB</italic><sub><italic>right, j</italic></sub>), outputs of the sensors to the neural processing stage and outputs from the neural processing stage (&#x003C3;(<italic>r</italic>)<sub><italic>left</italic></sub>, &#x003C3;(<italic>r</italic>)<sub><italic>right</italic></sub>). <bold>(B&#x02013;E)</bold> Implemented feedback configurations from LSO neuron to acoustic sensor: a positive (non-inverted)feedback to the sensor on the ipsilateral side in (B) and on the contralateral side in <bold>(C)</bold>, negative (inverted) feedback to the sensor on the ipsilateral side in <bold>(D)</bold> and on the contralateral side in <bold>(E)</bold>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0003.tif">
<alt-text content-type="machine-generated">Diagram of a neural processing system with left and right sensors. Arrows indicate signals between sensors and neural processing. Feedback configurations (B) to (E) show various pathways in dashed orange boxes.</alt-text>
</graphic>
</fig>
<p>These feedback configurations are implemented according to</p>
<disp-formula id="EQ10"><mml:math id="M13"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mi>v</mml:mi></mml:mrow></mml:msub><mml:mo>&#x000B7;</mml:mo><mml:mi>m</mml:mi><mml:mi>i</mml:mi><mml:mi>n</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>F</mml:mi><mml:msub><mml:mrow><mml:mi>B</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mi>a</mml:mi><mml:mi>x</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x000B7;</mml:mo><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:mi>c</mml:mi><mml:mi>r</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(10)</label></disp-formula>
<p>with the feedback strength <italic>FB</italic><sub><italic>i, j</italic></sub>&#x02208;{0, 1} given by the output &#x003C3;(<italic>r</italic>) of the neuronal processing stage as</p>
<disp-formula id="EQ11"><mml:math id="M14"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>F</mml:mi><mml:msub><mml:mrow><mml:mi>B</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>r</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(11)</label></disp-formula>
<p>Here, <italic>i, j</italic> denote the sides of the sensor and the LSO neuron, respectively, i.e., left or right. Feedback connections with <italic>i</italic> &#x0003D; <italic>j</italic> are ipsilateral projections, and <italic>i</italic> &#x02260; <italic>j</italic> are contralateral projections. To obtain positive or negative feedback, the inversion factor <italic>k</italic><sub><italic>inv</italic></sub> &#x0003D; &#x000B1;1 was introduced. Thus, non-inverted feedback refers to positive feedback. The sensor feedback strength <italic>a</italic><sub><italic>f</italic></sub>(<italic>t</italic>) is given in relation to the critical point <italic>a</italic><sub><italic>f, crit</italic></sub> as</p>
<disp-formula id="EQ12"><mml:math id="M15"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>k</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x000B7;</mml:mo><mml:msub><mml:mrow><mml:mi>a</mml:mi></mml:mrow><mml:mrow><mml:mi>f</mml:mi><mml:mo>,</mml:mo><mml:mi>c</mml:mi><mml:mi>r</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(12)</label></disp-formula>
<p>Since this relationship defines the linearity of the dynamics and the increase in gain. The larger the gain factor <italic>k</italic><sub><italic>a</italic></sub>(<italic>t</italic>), the larger the gain of the sensor, and for <italic>k</italic><sub><italic>a</italic></sub>(<italic>t</italic>)&#x0003E;0.9, nonlinear dynamics can be observed, as described in Section 2.1.2 and <xref ref-type="bibr" rid="B27">Lenk et al. (2023)</xref>; <xref ref-type="bibr" rid="B53">Ved et al. (2024)</xref>. Finally, using the <italic>min</italic> function, <italic>a</italic><sub><italic>f</italic></sub>(<italic>t</italic>) is restricted to &#x02264; <italic>k</italic><sub><italic>max</italic></sub>&#x000B7;<italic>a</italic><sub><italic>f, crit</italic></sub> to avoid unwanted oscillations due to the efferent feedback and additional nonlinear effects. A value of <italic>k</italic><sub><italic>max</italic></sub> &#x0003D; 0.95 was obtained empirically from the simulations.</p>
</sec>
</sec>
<sec>
<label>2.2</label>
<title>Experimental setups for ILD analysis</title>
<p>After describing the three parts of the implemented system in the previous sections, we will now introduce the experimental settings used to study the system&#x00027;s performance for ILD analysis. These experiments are intended to obtain a general understanding of the range of possible system behaviors. Thereby, we focus on two relevant sources of influence:</p>
<list list-type="bullet">
<list-item><p><bold>Influence of sensor responses</bold>. ILD analysis by the neural processing stage is directly influenced by the acoustic sensors&#x00027; response properties. We particularly investigate the influence arising from different, constant feedback strengths <italic>a</italic><sub><italic>f</italic></sub>(<italic>t</italic>) &#x0003D; <italic>a</italic><sub><italic>f, i</italic></sub> &#x0003D; <italic>const</italic> and differences in the sensor properties on the left and right sides due to, e.g., fabrication tolerances.</p></list-item>
<list-item><p><bold>Influence of efferent feedback configurations</bold>. Different efferent feedback mechanisms between the neural processing stage and the acoustic sensors are conceivable. We investigate the influence of such feedback configurations on the steady-state and maximal responses of LSO neurons and on the temporal evolution of the LSO response.</p></list-item>
</list>
<p>The experimental setting for studying the sensor influence is described in Section 2.2.1 and the corresponding results are given in Section 3.1. The experiments for the investigation of different efferent feedback configurations are presented in Section 2.2.2 and their corresponding results in Section 3.2. How these results are obtained from the outputs of the LSO neurons and the sensors is detailed in Section 2.2.3. The experiments in all cases are based on simulations of the models descried above for the neuronal processing stage and utilize either measured or modeled sensor responses to sound. All models and analyses have been implemented in Matlab<sup>TM</sup> 2024a, and differential equations are solved with ode15s, a multistep solver based on the numerical differentiation formulas (NDFs). The simulation parameters are presented in <xref ref-type="table" rid="T1">Table 1</xref>.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Parameters for simulations described in Sections 2.2.1 and 2.2.2.</p></caption>
<table frame="box" rules="all">
<thead>
<tr>
<th valign="top" align="left"><bold>Parameter</bold></th>
<th valign="top" align="left"><bold>Value</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left" colspan="2"><bold>Acoustic sensor</bold></td>
</tr>
<tr>
<td valign="top" align="left">Resonance frequency, <italic>f</italic><sub>0</sub></td>
<td valign="top" align="left">1&#x000B7;10<sup>3</sup> Hz</td>
</tr>
<tr>
<td valign="top" align="left">Quality factor, <italic>Q</italic><sub>0</sub></td>
<td valign="top" align="left">50</td>
</tr>
<tr>
<td valign="top" align="left">Transfer factor, &#x003B1;<sub><italic>s</italic></sub></td>
<td valign="top" align="left">19.2 m s<sup>-2</sup>K<sup>-1</sup></td>
</tr>
<tr>
<td valign="top" align="left">Time constant, &#x003B2;<sub><italic>s</italic></sub></td>
<td valign="top" align="left">1.0066&#x000B7;10<sup>3</sup> s<sup>-1</sup></td>
</tr>
<tr>
<td valign="top" align="left">Transfer factor, &#x003B3;<sub><italic>s</italic></sub></td>
<td valign="top" align="left">16.2&#x000B7;10<sup>6</sup> K&#x003A9;<sup>2</sup>s<sup>-1</sup>V<sup>-2</sup></td>
</tr>
<tr>
<td valign="top" align="left">Heater resistance, <italic>R</italic></td>
<td valign="top" align="left">15 &#x003A9;</td>
</tr>
<tr>
<td valign="top" align="left">Piezo and Amplification factor, <italic>k</italic><sub><italic>p</italic></sub></td>
<td valign="top" align="left">0.602&#x000B7;10<sup>6</sup> Vm<sup>-1</sup></td>
</tr>
<tr>
<td valign="top" align="left">Time constant of high-pass filter, &#x003C4;<sub><italic>hpf</italic></sub></td>
<td valign="top" align="left">1&#x000B7;10<sup>&#x02212;3</sup> s</td>
</tr>
<tr>
<td valign="top" align="left">Feedback offset, <italic>u</italic><sub><italic>dc</italic></sub></td>
<td valign="top" align="left">&#x02212;100&#x000B7;10<sup>&#x02212;3</sup> V</td>
</tr>
<tr>
<td valign="top" align="left">Critical feedback factor, <italic>a</italic><sub><italic>f, crit</italic></sub></td>
<td valign="top" align="left">0.03236</td>
</tr>
<tr>
<td valign="top" align="left">Time constant of low-pass filter, &#x003C4;<sub><italic>lpf</italic></sub></td>
<td valign="top" align="left">5&#x000B7;10<sup>&#x02212;3</sup> s</td>
</tr>
<tr>
<td valign="top" align="left" colspan="2"><bold>Sound signal</bold></td>
</tr>
<tr>
<td valign="top" align="left" colspan="2"><bold>Neural processing stage</bold></td>
</tr>
<tr>
<td valign="top" align="left" colspan="2"><bold>MNTB parameters</bold></td>
</tr>
<tr>
<td valign="top" align="left">Time constant, &#x003C4;<sub><italic>q</italic></sub></td>
<td valign="top" align="left">25&#x000B7;10<sup>&#x02212;6</sup> s</td>
</tr>
<tr>
<td valign="top" align="left">Leak constant, &#x003B1;<sub><italic>q</italic></sub></td>
<td valign="top" align="left">2</td>
</tr>
<tr>
<td valign="top" align="left">Multiplication factor, &#x003B2;<sub><italic>q</italic></sub></td>
<td valign="top" align="left">1 V</td>
</tr>
<tr>
<td valign="top" align="left">Multiplication factor, <italic>w</italic><sub><italic>mntb, exc</italic></sub></td>
<td valign="top" align="left">2 (Section 2.2.1), [200, 400, 750] (Section 2.2.2)</td>
</tr>
<tr>
<td valign="top" align="left" colspan="2"><bold>LSO parameters</bold></td>
</tr>
<tr>
<td valign="top" align="left">Time constant, &#x003C4;<sub><italic>r</italic></sub></td>
<td valign="top" align="left">25&#x000B7;10<sup>&#x02212;6</sup> s</td>
</tr>
<tr>
<td valign="top" align="left">Leak constant, &#x003B1;<sub><italic>r</italic></sub></td>
<td valign="top" align="left">1</td>
</tr>
<tr>
<td valign="top" align="left">Excitatory reversal potential, &#x003B2;<sub><italic>r</italic></sub></td>
<td valign="top" align="left">1 V</td>
</tr>
<tr>
<td valign="top" align="left">Multiplication factor, <italic>w</italic><sub><italic>lso, exc</italic></sub></td>
<td valign="top" align="left">1 (Section 2.2.1), 100 (Section 2.2.2)</td>
</tr>
<tr>
<td valign="top" align="left">Inhibitory reversal potential, &#x003B3;<sub><italic>r</italic></sub></td>
<td valign="top" align="left">3 V</td>
</tr>
<tr>
<td valign="top" align="left">Divisive inhibition constant, &#x003BA;<sub><italic>r</italic></sub></td>
<td valign="top" align="left">4</td>
</tr>
<tr>
<td valign="top" align="left">Sigmoid parameter, &#x003B1;<sub>&#x003C3;</sub></td>
<td valign="top" align="left">20 V<sup>-1</sup></td>
</tr>
<tr>
<td valign="top" align="left">Sigmoid parameter, &#x003B2;<sub>&#x003C3;</sub></td>
<td valign="top" align="left">0.2 V</td>
</tr>
<tr>
<td valign="top" align="left">Sound signal slope, &#x003B1;<sub><italic>l</italic></sub></td>
<td valign="top" align="left">0.5</td>
</tr>
<tr>
<td valign="top" align="left">Sound signal offset, &#x003B2;<sub><italic>l</italic></sub></td>
<td valign="top" align="left">0.5</td>
</tr>
<tr>
<td valign="top" align="left">Sound signal frequency, <italic>f</italic></td>
<td valign="top" align="left">1&#x000B7;10<sup>3</sup> Hz</td>
</tr></tbody>
</table>
</table-wrap>
<sec>
<label>2.2.1</label>
<title>Setup for ILD analysis without efferent feedback to study sensor influence on LSO neuron response</title>
<p>The first set of experiments investigates the impact of the sensor properties, particularly the feedback strength <italic>a</italic><sub><italic>f</italic></sub> and the deviation between two sensors, on subsequent processing in the system. To this end, we record the pre-amplified and high-pass filtered responses <italic>u</italic><sub><italic>ac</italic></sub>(<italic>t</italic>) of two different acoustic MEMS sensors to sounds generated by a Genelec 8010AP-6 loudspeaker. For measurement, a data acquisition unit with 100 kS/s sample rate was used, and sound signals were generated in Matlab and applied to the loudspeaker via a Babyface Pro FS. Sound signals were recorded simultaneously with a reference microphone beyerdynamic MM 1, to obtain the sound amplitudes of the microphone recordings.</p>
<p>Two sets of measurements were performed: first, the resonance frequency of each sensor was determined; second, the sound response for different feedback strengths and sound amplitudes was acquired. To determine the resonance frequency, the sensors were placed at a distance of 1 m from the loudspeaker, and a pure-tone frequency sweep was played. The resulting sensor response, pre-amplified by a factor of 1,000 and high-pass filtered with a cut-off frequency of 159 Hz, was converted into a frequency response by FFT, and the highest-amplitude peak was extracted as the resonance frequency. For the here used MEMS cantilevers, the extracted resonance frequencies are 950 Hz and 986 Hz. In the second measurement, each sensor was excited separately with a pure tone sound at its resonance frequency with different sound amplitudes. Besides the sound amplitude, also the feedback strength <italic>a</italic><sub><italic>f</italic></sub> was varied, i.e. <italic>k</italic><sub><italic>a</italic></sub> &#x0003D; [0, 0.2, 0.4, 0.6, 0.8], while <italic>u</italic><sub><italic>dc</italic></sub> was kept constant at &#x02212;100 mV. Then, the maximal amplitude of the sensor signal was calculated and plotted against the RMS sound amplitude to obtain the transfer characteristics.</p>
<p>Finally, to study the influence of feedback strength and the differences in sensor properties on the ILD analysis, the sensor responses were used as input for simulations of the neuronal processing stage. In detail, Equations 1&#x02013;4,10,11 were neglected in the model, and the measured data were directly input as <italic>u</italic><sub><italic>ac</italic></sub>(<italic>t</italic>) for Equation 5. To check the influence of the different sensor characteristics and feedback, two cases were simulated: (i) The sensor response dataset is the same for the left- and right sensor in the simulations, using the measurements of sensor 1 with the resonance frequency of 950 Hz and (ii) the dataset used for the left sensor comes from sensor one (resonance frequency of 950 Hz) and the dataset used for the right sensor comes from sensor two (resonance frequency of 986 Hz). The first condition investigates how a real sensor would affect subsequent system behavior across different feedback levels, while the second condition investigates how the introduction of sensor discrepancies, e.g., from manufacturing tolerances, would further affect system behavior. The results are described in Section 3.1.</p>
</sec>
<sec>
<label>2.2.2</label>
<title>Setup for ILD analysis including efferent feedback from LSO neurons</title>
<p>In the previous section, acoustic MEMS sensors were measured and used as input to the simulation of envelope extraction and subsequent processing by the LSO-MNTB subsystem. To investigate an adaptive feedback for the sound source localization the outputs of the neural processing stage (&#x003C3;(<italic>r</italic>)<sub><italic>left, i</italic></sub>, &#x003C3;(<italic>r</italic>)<sub><italic>right, i</italic></sub>) need to feed back into either the ipsi- and contralateral acoustic sensors (via <italic>FB</italic><sub><italic>left, i</italic></sub>, <italic>FB</italic><sub><italic>right, i</italic></sub>; see <xref ref-type="fig" rid="F3">Figure 3A</xref>). To study this, the complete system was simulated to test a broader range of configurations and observe their effects on system behavior. A synthetic sound signal was generated, fed into the sensor model as <italic>F</italic><sub><italic>ext</italic></sub>, and the sensor response was used as input to the MNTB and the LSO neurons, with an additional input to the LSO neuron based on the MNTB neuron output. The output of the LSO neuron is then used to change the feedback to the sensor, i.e., <italic>FB</italic>. To study the ILD dependence, different ILD levels <italic>lvl</italic> are applied, and the LSO response &#x003C3; to the different levels is the final measure. Thereby, the sound for the left and right sensor is synthetically generated for a fixed ILD level <italic>ILD</italic>, a fixed slope &#x003B1;<sub><italic>l</italic></sub>, and an offset &#x003B2;<sub><italic>l</italic></sub> according to</p>
<disp-formula id="EQ13"><mml:math id="M16"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>F</mml:mi></mml:mrow><mml:mrow><mml:mi>e</mml:mi><mml:mi>x</mml:mi><mml:mi>t</mml:mi><mml:mo>,</mml:mo><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>&#x000B1;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mrow><mml:mi>I</mml:mi><mml:mi>L</mml:mi><mml:mi>D</mml:mi></mml:mrow></mml:msub><mml:mo>&#x000B7;</mml:mo><mml:mi>I</mml:mi><mml:mi>L</mml:mi><mml:mi>D</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>I</mml:mi><mml:mi>L</mml:mi><mml:mi>D</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x000B7;</mml:mo><mml:mo class="qopname">sin</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C9;</mml:mi><mml:mi>t</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo><mml:mi>j</mml:mi><mml:mo>&#x02208;</mml:mo><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mi>l</mml:mi><mml:mi>e</mml:mi><mml:mi>f</mml:mi><mml:mi>t</mml:mi><mml:mo>,</mml:mo><mml:mtext>&#x000A0;</mml:mtext><mml:mi>r</mml:mi><mml:mi>i</mml:mi><mml:mi>g</mml:mi><mml:mi>h</mml:mi><mml:mi>t</mml:mi></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(13)</label></disp-formula>
<p>This equation satisfies the constraint of a constant sum of left and right sound levels for different ILD levels, i.e., <italic>F</italic><sub><italic>ext, right</italic></sub>&#x0002B;<italic>F</italic><sub><italic>ext, left</italic></sub> &#x0003D; &#x003B1;<sub><italic>ILD</italic></sub><italic>ILD</italic>&#x0002B;&#x003B2;<sub><italic>ILD</italic></sub>&#x02212;&#x003B1;<sub><italic>ILD</italic></sub><italic>ILD</italic>&#x0002B;&#x003B2;<sub><italic>ILD</italic></sub> &#x0003D; <italic>const</italic>. This approach filters out cues related to the overall loudness and distance. Since in these simulations we used identical left and right sensors, the sound frequency is the same on both sides. Furthermore, there were no differences between the sides included in these studies, i.e., the simulation parameters were identical for both acoustic sensors, for both MNTB neurons, and for both LSO neurons. An overview of the parameters is presented in <xref ref-type="table" rid="T1">Table 1</xref>. These simulations were conducted for the four configurations of efferent feedback to study their influence on ILD extraction and the sensitivity of LSO neurons. Additionally, the potential impact of the excitatory-inhibitory balance between LSO and MNTB neurons on ILD extraction was studied. To this end, simulations with different values for the MNTB neuron scaling factor <italic>w</italic><sub><italic>mntb, exc</italic></sub> and the LSO neuron scaling factor <italic>w</italic><sub><italic>mntb, exc</italic></sub> have been conducted for a broad set of input ILD ranges. The results are presented in Section 3.2.</p>
</sec>
<sec>
<label>2.2.3</label>
<title>Analysis</title>
<p>The architecture and model equations are explained in detail in the previous sections, together with the parameters in <xref ref-type="table" rid="T1">Table 1</xref> used to execute the simulations. To analyze the influence of sensor properties, feedback configuration, and excitatory-inhibitory balance, the output of the LSO neurons was studied in terms of steady-state behavior, temporal dynamics, and sensitivity to ILD for a simulation time of 1 s. The initial condition of the system state was determined by performing an initialization step. Therefore, the system was simulated for 2 s in an open-loop setting to determine the initial equilibrium point. Afterwards, the system&#x00027;s feedback configuration has been established, and an input stimulus has been provided.</p>
<p>The steady state response &#x003C3;<sub><italic>eq</italic></sub> was calculated as the average value of the last 0.333 s for simulations in Section 3.1 and 0.1 s for all the other simulations in Section 3.2 for both LSO neurons and feedback configurations, separately. To get the impact of both steady state LSO outputs over the whole ILD range, the difference &#x00394;&#x003C3;<sub><italic>eq</italic></sub> &#x0003D; &#x003C3;<sub><italic>right</italic></sub>&#x02212;&#x003C3;<sub><italic>left</italic></sub> is calculated.</p>
<p>To study the temporal behavior of the system, the LSO response over time for a fixed set of parameters is analyzed in section 3.2.2. Therefore, the time <italic>TT</italic><sub>90</sub> of the first occurrence of 90% of the LSO response maximum &#x003C3;(<italic>r</italic>(<italic>t</italic><sub><italic>max</italic></sub>)) is computed to inspect characteristics of the transient behavior. Additionally, the temporal evolution of the system is studied by extracting responses &#x003C3;(<italic>r</italic>(<italic>t</italic>)) for non-overlapping temporal slices <italic>t</italic>&#x02208;[<italic>t</italic><sub><italic>n</italic></sub>, <italic>t</italic><sub><italic>n</italic></sub>&#x0002B;&#x00394;<italic>t</italic>] of &#x00394;<italic>t</italic> &#x0003D; 0.1 s. The average activity per temporal slice is computed subsequently. Response curves for specific time slices across the temporal evolution are then obtained by computing average values for each ILD level and combining them into a curve.</p>
<p>The sensitivity of the LSO response is computed by numerical derivation of the steady-state response &#x003C3;<sub><italic>eq</italic></sub>(<italic>ILD</italic>) with respect to ILDs &#x003B4;&#x003C3;/&#x003B4;<italic>ILD</italic>, using a central finite differences scheme.</p>
</sec>
</sec>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<sec>
<label>3.1</label>
<title>Influence of sensor properties on neuronal processing of ILDs (without efferent feedback)</title>
<p>The first set of experiments investigated the feasibility of performing the ILD analysis using measurements from fabricated MEMS sensors together with the neural processing stage model. The response of the two sensors (resonance frequencies 950 Hz and 986 Hz), i.e., their transfer characteristics, is shown in <xref ref-type="fig" rid="F4">Figure 4</xref>. A largely linear relationship is observed between the reference microphone amplitude, as a measure of sound amplitude, and the envelope signal <italic>u</italic><sub><italic>env</italic></sub>(<italic>t</italic>) of the sensors. Thereby, increasing the feedback strength increases the sensor gain, as described earlier, and only linear characteristics are observed due to the restriction <italic>k</italic><sub><italic>a</italic></sub> &#x02264; 0.8. Furthermore, because of differences in sensor properties, the two sensors exhibit different response amplitudes for similar sound amplitudes. The highly linear relationship between the microphone signal and the stationary input provides a characteristic that can be easily leveraged by subsequent processing stages. Additionally, the monotonic relationship with constant feedback indicates that, in principle, there exists a clear control dimension for tuning the sensor&#x00027;s response curve. On the one hand, these circumstances indicate that the MEMS sensors can be used as meaningful transducers for the input sound signals. On the other hand, there are clear influences of differences in sensor properties (geometry, fabrication tolerances, etc.) on the sensor response. The effect size of these tolerances is of the same order as the sensor&#x00027;s operating range. Thus, one can compensate only for a certain amount of difference using feedback, limiting the number of sensors that can be used.</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Measured MEMS sensor responses. Extracted envelope response of two MEMS sensors vs. microphone signal amplitudes for pure tone sound signals with each sensor excited at its resonance frequency (left sensor 950 Hz solid line and right sensor 986 Hz dashed line) and for different feedback strengths <italic>a</italic><sub><italic>f</italic></sub>/<italic>a</italic><sub><italic>crit</italic></sub> &#x0003D; <italic>k</italic><sub><italic>a</italic></sub> &#x0003D; 0, 0.2, 0.4, 0.6, 0.8.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0004.tif">
<alt-text content-type="machine-generated">Graph showing the relationship between microphone signal in volts and $u_{env}$ in volts for different feedback strengths $k_a$ as fractions of a critical value $a_crit$ ($k_a = 0.0$ to $k_a = 0.8$). Curves indicate increasing $u_{env}$ with higher feedback strengths. Solid lines represent responses of the left sensor with $950Hz$ resonance frequency and dashed lines represent responses of the right sensor with $986 Hz$. The left sensor shows a higher gain across feedback strengths in comparison to the right sensor.</alt-text>
</graphic>
</fig>
<p>In a second step, the obtained envelope signals were used as input into the neural processing stage for ILD analysis. Here, either the measurements from one MEMS sensor were used to model both the left and right sensors in the model, or the measurements from both MEMS sensors were applied to stimulate the neural processing stage (<xref ref-type="fig" rid="F5">Figures 5A</xref>, <xref ref-type="fig" rid="F5">B</xref>, respectively). The system was run in an open-loop setting, i.e., without direct feedback from the neural processing stage to the acoustic sensing stage. Instead, constant feedback to the acoustic sensing stage was applied with different constant feedback factors <italic>a</italic><sub><italic>f, i</italic></sub>(<italic>t</italic>) &#x0003D; <italic>a</italic><sub><italic>f, i</italic></sub> &#x0003D; <italic>const</italic>. Thereby, the feedback strength for the left sensor was kept constant while that for the right sensor was varied to study its effect.</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>System ILD response characteristics. LSO response profiles across input ILDs for excitation-inhibition balance 1:2 for different feedback settings <italic>k</italic><sub><italic>a</italic></sub> &#x0003D; 0, 0.2, 0.4, 0.6, 0.8 and same acoustic MEMS sensor data for left (solid line) and right (dashed line) in <bold>(A)</bold> and for data from two different sensors in <bold>(B)</bold>. The point of equal activation (&#x003C3;<sub><italic>eq, left</italic></sub> &#x0003D; &#x003C3;<sub><italic>eq, right</italic></sub>) shifts depending on the feedback setting and the pairing of identical or different sensors (see Section 3.1 for details).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0005.tif">
<alt-text content-type="machine-generated">The two graphs illustrate the left and right LSO responses $\sigma_{eq}$ as a function of interaural level differences (ILDs), ranging approximately from -3 to 2&#x000D7;10^{-4}. The feedback strength of the left sensor is kept constant ($k_{a,left}=0.4$), while the feedback strength of the right sensor is varied from (k_{a,right}=0.0 to k_{a,right}=0.8$). Graph A shows the responses obtained using identical measurement data for both sensors, whereas Graph B presents the responses for two different sensor measurement datasets. The curves demonstrate that increasing the feedback strength of the right sensor shifts the intersection point of the two neuronal responses toward smaller ILDs.</alt-text>
</graphic>
</fig>
<p>From <xref ref-type="fig" rid="F5">Figure 5</xref>, it can be observed that the response curves of the left and right LSO neurons resemble the typical sigmoidal tuning curve expected from their biological counterpart (cf. <xref ref-type="fig" rid="F1">Figure 1</xref>). This observation holds independent of the use of measurements from only one MEMS sensor (<xref ref-type="fig" rid="F5">Figure 5A</xref>) or two MEMS sensors with fabrication-based differences (<xref ref-type="fig" rid="F5">Figure 5B</xref>) and independent of the utilized constant feedback factor <italic>a</italic><sub><italic>f, i</italic></sub>. However, only in the case of input from one MEMS sensor, the LSO responses are symmetric for the left and right neurons. Furthermore, the influence of the feedback constants is common in both simulations with either one or two MEMS sensors. The response curves shift depending on the left-right balance of the constant feedback factors <italic>a</italic><sub><italic>f</italic></sub>. In particular, the point of equal amplitude between the left and right LSO, i.e., the intersection point between both curves, is shifted further to more negative ILD values and larger LSO activities &#x003C3; for larger <italic>a</italic><sub><italic>f</italic></sub> constants on the right acoustic sensing stage. Similarly, the intersection points are moved further toward negative ILD values and smaller &#x003C3; values for larger <italic>a</italic><sub><italic>f</italic></sub> constants on the left acoustic sensing stage. In this way, the left-right balance of feedback strengths can be used to tune the sensitivity more toward one side or the other.</p>
<p>Notably, for the simulated case of identical MEMS sensors and equal constant feedback (one MEMS sensor measurement used as left and right input; <xref ref-type="fig" rid="F5">Figure 5A</xref>, <italic>a</italic><sub><italic>f, left</italic></sub> &#x0003D; <italic>a</italic><sub><italic>f, right</italic></sub> &#x0003D; 0.4), the point of equal amplitude perfectly matches an ILD value of zero. Thus, for a balanced setup with equal feedback strength on the left and right sides, the system would produce a balanced output. Instead, for the case of two differing MEMS sensors and equal constant feedback (one MEMS sensor measurement per input side; <xref ref-type="fig" rid="F5">Figure 5B</xref>, <italic>a</italic><sub><italic>f, left</italic></sub> &#x0003D; <italic>a</italic><sub><italic>f, right</italic></sub> &#x0003D; 0.4) the point of equal amplitude is off center (here, shifted towards the left, <italic>ILD</italic> &#x0003C; 0). Therefore, the system would produce an imbalanced output due to the sensor differences, e.g., due to fabrication. Importantly, though, a balanced system output can be re-established by counterbalancing via different feedback strengths (stronger constant feedback to the right than to the left; <xref ref-type="fig" rid="F5">Figure 5B</xref>, <italic>a</italic><sub><italic>f, left</italic></sub> &#x0003D; 0.4, <italic>a</italic><sub><italic>f, right</italic></sub> &#x0003D; 0.8)). This highlights how specifying, or learning, specific feedback strengths per sensor could be used to calibrate the overall system in light of fabrication-based differences of the MEMS sensor to balance the input to the neuronal stage. Given the results, this approach would be possible, since the order of magnitude at which feedback strength <italic>a</italic><sub><italic>f</italic></sub> can alter the response profile is the same as the effect size of the sensor differences.</p>
<p>However, this local feedback <italic>a</italic><sub><italic>f</italic></sub> is assumed to adapt to signal statistics, such as reducing the feedback for large sound amplitudes for protection reasons or reducing the response to constant sounds to highlight important features or reduce redundant information (sensory adaptation). In such cases, one possibility to still balance the input to the neuronal processing stage is efferent feedback from the neuronal processing stage, thereby dynamically adjusting the feedback strengths <italic>a</italic><sub><italic>f</italic></sub> to tune the sensor response profiles to match each other. Another benefit of dynamically tuning the sensor&#x00027;s response profiles via efferent feedback arises when sensor response profiles degrade, e.g., due to aging or temperature effects. In this case, dynamic adaptation could keep the response profiles matched, achieving system robustness. Possible system behaviors resulting from efferent feedback for the task of ILD analysis are discussed in the following section.</p>
</sec>
<sec>
<label>3.2</label>
<title>Influence of efferent feedback on system response</title>
<p>The second set of experiments investigates the possible system behavior if an efferent feedback is introduced from the neuronal processing stage to the sensor stage (see <xref ref-type="fig" rid="F2">Figures 2</xref>, <xref ref-type="fig" rid="F3">3</xref>). The focus here is on the range of system responses, its temporal response, stability, and ILD sensitivity and tuning.</p>
<sec>
<label>3.2.1</label>
<title>Configuration-dependent LSO neuron response characteristics</title>
<p>To study the influence of the efferent feedback onto the system characteristics, the LSO response at equilibrium was extracted across the range of tested ILD values (<italic>ILD</italic> &#x0003D; [&#x02212;1, 1]), as shown in <xref ref-type="fig" rid="F6">Figures 6A</xref>&#x02013;<xref ref-type="fig" rid="F6">C</xref> using different excitation-inhibition balances &#x003C1;<sub><italic>w</italic></sub> &#x0003D; <italic>w</italic><sub><italic>LSO</italic></sub>/<italic>w</italic><sub><italic>MNTB</italic></sub> &#x0003D; 1:2 (A), 1:4 (B), and 1:7.5 (C) (cf. Section 2.2.2 for details). Here, only the response of the right LSO neuron is shown, since both curves (left and right LSO neuron responses) are symmetric. Subsequent processing for sound source localization based on the ILD could use either the response from one side or a combination of the responses from both sides. Thus, to give an idea of the input for the latter case, the difference between left and right response &#x003C3;<sub><italic>right</italic></sub>&#x02212;&#x003C3;<sub><italic>left</italic></sub> is shown in the right column of <xref ref-type="fig" rid="F6">Figures 6D</xref>&#x02013;<xref ref-type="fig" rid="F6">F</xref>). In all graphs, the black curve represents the case without efferent feedback, whereas the colored lines represent the four different feedback configurations: positive ipsilateral (red), positive contralateral (purple), negative ipsilateral (orange), and negative contralateral (green line).</p>
<fig position="float" id="F6">
<label>Figure 6</label>
<caption><p>System ILD response characteristics at equilibrium. Response profiles of right LSO neuron across input ILDs for different excitation-inhibition balance levels &#x003C1;<sub><italic>w</italic></sub> [left column, <bold>(A&#x02013;C)</bold>] and respective right-left difference profiles [right column, <bold>(D&#x02013;F)</bold>]. Five different feedback configurations are depicted: without feedback from the LSO neuron (No FB, black; constant <italic>a</italic><sub><italic>f</italic></sub>), positive ipsilateral (Ipsi Pos, red; <italic>k</italic><sub><italic>inv</italic></sub> &#x0003D; &#x0002B;1, <italic>i</italic> &#x0003D; <italic>j</italic>), negative ipsilateral (Ipsi Neg, orange; <italic>k</italic><sub><italic>inv</italic></sub> &#x0003D; &#x02212;1, <italic>i</italic> &#x0003D; <italic>j</italic>), positive contralateral (Contra Pos, purple; <italic>k</italic><sub><italic>inv</italic></sub> &#x0003D; &#x0002B;1, <italic>i</italic> &#x02260; <italic>j</italic>), and negative contralateral (Contra Neg, green; <italic>k</italic><sub><italic>inv</italic></sub> &#x0003D; &#x02212;1, <italic>i</italic> &#x02260; <italic>j</italic>). In all cases, the steady-state response &#x003C3;<sub><italic>eq</italic></sub> is shown.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0006.tif">
<alt-text content-type="machine-generated">Six-panel figure (A&#x02013;F) showing interaural level difference (ILD) response curves at equilibrium. Panels A&#x02013;C plot the right LSO neuron equilibrium response, $sigma_{eq}$ (a.u.), versus ILD (a.u.), and panels D&#x02013;F plot the difference between right and left LSO neuron responses, delta $sigma_{eq}$ (a.u.), versus ILD. Each panel corresponds to a different rho_w value (1/2, 1/4, and 1/7.5). Five curves are shown in each panel representing different feedback configurations: no feedback, ipsilateral positive, ipsilateral negative, contralateral positive, and contralateral negative. As rho_w decreases, $sigma_{eq}$ curves shift toward higher ILDs, while delta $sigma_{eq}$ curves show little change across feedback conditions or rho_w values.</alt-text>
</graphic>
</fig>
<p>For all curves, a monotonic increase in the LSO neurons&#x00027; response amplitude with increasing ILD can be observed (<xref ref-type="fig" rid="F6">Figure 6</xref>, right column). Changing the ratio between inhibitory input (<italic>w</italic><sub><italic>MNTB</italic></sub>) and excitatory input (<italic>w</italic><sub><italic>LSO</italic></sub>) for the LSO neuron shifts its sensitive range. Here, with an increasingly stronger inhibitory influence, the location of the slope shifts from the contralateral towards the ipsilateral side (from <xref ref-type="fig" rid="F6">Figures 6A</xref>&#x02013;<xref ref-type="fig" rid="F6">C</xref>). If no efferent feedback is applied, the overall shape of the response curve (&#x003C3; vs. ILD) is thereby preserved. Beyond these common properties of monotonic increase and the shift in sensitivity due to the excitation-inhibition balance, the different feedback configurations have distinct effects, mainly altering the shape of the response curves and their relative positions compared to the case without feedback. Two configurations increase sensitivity while decreasing the ILD range within which the LSO neuron is sensitive, compared to the case without efferent feedback. This can be attributed to either a direct effect increasing the excitatory input to the LSO neuron by the amplifying feedback to the sensor on the ipsilateral side (ipsi positive feedback) or an indirect effect via damping the sensor response on the contralateral side due to the negative feedback and thus decreasing the inhibitory influence (contra negative feedback). Conversely, the two other configurations lead to a shallower slope with a broader sensitive range - either directly via the damping (negative) feedback to the ipsilateral side, reducing the excitatory influence (ipsi negative feedback), or indirectly via the positive (amplifying) feedback to the contralateral side, increasing the inhibitory input to the LSO neuron (contra positive feedback).</p>
<p>Furthermore, an interaction exists between the excitation-inhibition balance and the effect of feedback configuration. The configurations with the lower sensitivities (ipsi negative and contra positive) are affected the most, i.e., they exhibit the strongest shift of their response curves to the contralateral side for &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/2 and the ipsilateral side for &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/7.5. The opposite pattern is visible for the feedback configurations, yielding an increase in sensitivity. Their sensitive response regions move less strongly away from the center when the excitation-inhibition ratios change. In this context, the lateralization, i.e., the strength of this shift, is most pronounced for the feedback configuration with positive feedback to the contralateral side (purple line). These results demonstrate that it is possible to tune both the sensitivity and the lateralization of LSO neurons using efferent feedback at the sensing stage.</p>
<p>Computing the difference between the left and right LSO output largely compensates for lateral displacements of the sensitive regions independent of the feedback configuration and excitation-inhibition balance, but preserves the different effects on the slope of the response curve (<xref ref-type="fig" rid="F6">Figures 6D</xref>&#x02013;<xref ref-type="fig" rid="F6">F</xref>). Due to these slope changes, the feedback configuration can result in either more analog ILD coding (purple and orange curves) or a more digital response with two or three saturation regimes for configurations with increased sensitivity (red and green curves). The latter could be used to estimate the direction of the sound source, enabling a fast reaction, such as turning the head towards it. Conversely, analog coding could support sound-stream segregation if localization and sound analysis are combined to enable, for example, listening to a specific sound source while remaining aware of the overall environment.</p>
<p>In sum, the impact of feedback configuration and excitation-inhibition balances on the system&#x00027;s processing can be leveraged for technical applications. With these design dimensions, it is possible to largely shift and tune the system&#x00027;s processing to better reflect the concrete circumstances of a current context. Additionally, despite the combination of two nonlinear dynamical systems, a stable response is obtained. Nevertheless, the temporal behavior exhibits interesting properties, which are discussed in the next section.</p>
</sec>
<sec>
<label>3.2.2</label>
<title>Temporal response behavior</title>
<p>The above results revealed how the system&#x00027;s behavior depends on the feedback configuration and the excitation-inhibition balance for constant input sound levels, and how to analyze the stable (converged equilibrium) responses. Beyond these response characteristics, the system exhibits rich temporal dynamics. Depending on the specific ILD, the feedback configuration and excitation-inhibition balance, a transient response phase is visible in the LSO response profile (cf. <xref ref-type="fig" rid="F7">Figures 7C</xref>, <xref ref-type="fig" rid="F7">D</xref> for exemplary behaviors). For example, in <xref ref-type="fig" rid="F7">Figure 7C</xref>, the negative feedback onto the ipsilateral sensor yields a damping effect and a fast saturation of the LSO response to the equilibrium value, after a short transient phase. For this case, the overall LSO neuron response &#x003C3;(<italic>ILD</italic>) does not change over time, as demonstrated by the responses obtained after different time intervals in <xref ref-type="fig" rid="F7">Figure 7A</xref>. A different pattern can be observed for a configuration with positive feedback to the contralateral side, which increases the sensor response and thus the inhibitory input to the LSO neuron <xref ref-type="fig" rid="F7">Figures 7B</xref>, <xref ref-type="fig" rid="F7">D</xref>). In this case, the LSO neuron response oscillates around the equilibrium value before settling at it. The transient time to reach the equilibrium value is much longer than in the previously discussed case. These effects are differently pronounced for different ILD values (<xref ref-type="fig" rid="F7">Figure 7B</xref>). Overall, these examples provide hints on how the system&#x00027;s temporal response characteristics are influenced by the discussed design choices. Viewing the system as an oscillator, it either becomes more damped <xref ref-type="fig" rid="F7">Figures 7A</xref>, <xref ref-type="fig" rid="F7">C</xref>) or less damped (more oscillatory) <xref ref-type="fig" rid="F7">Figures 7B</xref>, <xref ref-type="fig" rid="F7">D</xref>) depending on the feedback configuration and parametrization. Such a tendency towards more or less volatility could be exploited in technical setups, making the system more robust to fluctuations or more sensitive to them, depending on application demands. Furthermore, the oscillatory response enables both fast and slow responses to sound localization, as described above. The first maximum of &#x003C3; provides a fast but coarse estimate of the sound source, which could be used for rapid danger detection and rapid movement towards or away from the sound source. Furthermore, it would initially create a stronger differentiation between localizations of different sound sources. The equilibrium value, on the other hand, provides a slower but more accurate estimate of the ILD, which might be more important for understanding and segregating sounds in noisy environments.</p>
<fig position="float" id="F7">
<label>Figure 7</label>
<caption><p>LSO temporal response characteristics. LSO response profiles across input ILDs for different temporal periods (color coded) after stimulus onsets for the case of negative ipsilateral feedback <bold>(A)</bold> and positive contralateral feedback <bold>(B)</bold> for an excitation-inhibition balance factor of &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/2. C, D: exemplary temporal LSO response profiles for the respective cases <bold>(C, D)</bold>. Gray response profiles depict the contralateral LSO response for the respective case (not shown in <bold>(A)</bold> and <bold>(B)</bold>). Insets depict details from the main plots. Colors for the type of feedback configuration match the ones from <xref ref-type="fig" rid="F6">Figure 6</xref>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0007.tif">
<alt-text content-type="machine-generated">Four-panel figure labeled A&#x02013;D. Panels A and B show temporal LSO responses, $sigma$ (a.u.), plotted versus interaural level difference (ILD (a.u.)). Panel A depicts the negative ipsilateral feedback condition, and panel B shows the positive contralateral feedback condition. In both panels, $sigma$ increases from approximately 0 to 1 as ILD increases. Panels C and D are insets showing $sigma$ over a one-second time window at ILD = -0.6. In panel C (negative ipsilateral feedback), $sigma$ remains nearly constant, while in panel D (positive contralateral feedback), $sigma$ exhibits noticeable fluctuations.</alt-text>
</graphic>
</fig>
<p>Additionally, a close relationship exists between the time <italic>TT</italic><sub>90</sub> it takes the system to reach its transient maximum (i.e., 90<italic>%&#x003C3;</italic><sub><italic>max</italic></sub>), its sensitivity, i.e., the slope, of the system for a given ILD value, and its lateralization (<xref ref-type="fig" rid="F8">Figure 8</xref>). ILD ranges, in which the system is operated in a non-saturating regime (slope different from zero; <xref ref-type="fig" rid="F8">Figure 8</xref>), exhibit a longer <italic>TT</italic><sub>90</sub> than the saturated ones. Interestingly, though, <italic>TT</italic><sub>90</sub> profiles deviate from the baseline configuration without feedback (no FB) only if the &#x003C3;<sub><italic>eq</italic></sub> of the respective feedback configuration is larger than that of the case without feedback. This typically occurs on the more contralateral side of the LSO response curve in the no-feedback condition. For which feedback configurations and which ILDs this holds, depends strongly on the excitation-inhibition balance &#x003C1;<sub><italic>w</italic></sub>. The feedback configurations with a smaller &#x003C3;<sub><italic>eq</italic></sub> than the no- feedback case show only minor deviations from the respective baseline profiles and lead to overall shorter <italic>TT</italic><sub>90</sub> values.</p>
<fig position="float" id="F8">
<label>Figure 8</label>
<caption><p>Sensitivity and timing of LSO response characteristics for different ILDs. Extracted gradient of ILD system responses for different feedback configurations and an excitation-inhibition balance level of &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/2 in <bold>(A)</bold> and &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/7.5 in <bold>(B)</bold>. In <bold>(C)</bold> and <bold>(D)</bold>, the time to reach 90% of maximum LSO amplitude is shown for different ILDs and the same excitation-inhibition balance levels as in <bold>(A)</bold> and <bold>(B)</bold>.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-20-1736957-g0008.tif">
<alt-text content-type="machine-generated">Four-panel figure labeled A&#x02013;D showing the effects of different feedback configurations&#x02014;no feedback, ipsilateral positive, ipsilateral negative, contralateral positive, and contralateral negative&#x02014;on the derivative of the right LSO neuron response with respect to ILD, $\delta\sigma / \delta ILD$. Panels A and B plot $\delta\sigma / \delta ILD$ versus ILD for $rho_w=1/2$ and $rho_w=1/7.5$, respectively. The response curves in panels A and B are similar in shape but mirrored around ILD = 0 across all feedback configurations. Panels C and D show the time required to reach 90% of the maximum LSO response as a function of ILD for the same two rho_w values. For $rho_w=1/2$, positive contralateral and negative ipsilateral feedback need more time to reach 90% of the maximum amplitude, whereas for $rho_w=1/7.5$, negative contralateral and positive ipsilateral feedback need more time to reach 90% of the maximum amplitude.</alt-text>
</graphic>
</fig>
<p>Such a behavior could serve different purposes&#x02014;either as a <italic>confidence indicator</italic>, or as an <italic>additional coding dimension</italic>. In terms of a <italic>confidence indicator</italic>, longer detection times (interval until transient maximum is reached) could signal subsequent processing units, that the sensitive range of the LSO neuron shifts towards the more contralateral side. Thus, the detection might be less confident, and potentially more weight should be given to the opposite LSO neuron. In this case, the <italic>TT</italic><sub>90</sub> could be used as a fast indicator during the transient phase of the response to determine whether the input&#x00027;s ILD range likely falls into the range for which the specific LSO neuron is sensitive or not. This indication could be used to adaptively re-tune the LSO neuron with the factors identified above (i.e., feedback configuration and excitation-inhibition balance) to adjust the sensitivity range to the currently relevant ILD range. In terms of a <italic>additional coding dimension</italic>, the response time <italic>TT</italic><sub>90</sub> itself can be used as another encoding of the ILD alongside the amplitude response code for the more contralateral sounds. Particularly for the cases with rather small sensitivities <italic>d&#x003C3;</italic>/<italic>dILD</italic>, using the <italic>TT</italic><sub>90</sub> value could improve the resolution in ILD detection.</p>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>This study presents a novel approach to neuromorphic auditory processing by integrating adaptive bio-inspired acoustic sensors, as a model of the cochlea, with an efferent feedback mechanism, driven by the neural processing stage. The latter is inspired by superior olive processing for sound localization based on ILD, realized by LSO and MNTB neurons. The bio-inspired acoustic sensor models the processing and sensing of the cochlea, particularly frequency decomposition and (nonlinear) amplification of the input. The neural feedback dynamically adjusts the sensing properties, such as gain, bandwidth, and linearity, by changing the strength of the local feedback to the sensor. This local feedback models the impact of outer hair cells in the cochlea and the sensitivity tuning of the acoustic neuron fibers at the synapse with the inner hair cell. As demonstrated by measurements from two different sensors, adjusting this local feedback to the sensors changes ILD sensitivity only slightly but can help overcome differences in the sensing stage between the left and right sides, which might arise from fabrication tolerances, aging, and environmental influences. However, since efferent feedback can also be driven by sources outside the LSO, this would influence LSO-based ILD detection by shifting the balance between left and right inputs. Efficiently processing multiple efferent feedback sources remains an open problem, necessitating the development of appropriate mechanisms to overcome current limitations.</p>
<p>The MEMS cochlea and the utilization of its feedback component, together with the dynamical multi-stage cross-hemispherical processing, are key components of the proposed system architecture and set it apart from many previous studies. While other impressive approaches to neuromorphic sound source localization exist, many utilize artificial preprocessing of sound source data (<xref ref-type="bibr" rid="B14">Glackin, 2010</xref>; <xref ref-type="bibr" rid="B55">Wang et al., 2018</xref>; <xref ref-type="bibr" rid="B61">Zhang et al., 2024</xref>; <xref ref-type="bibr" rid="B56">Ware et al., 2025</xref>), ultrasonic transducers (<xref ref-type="bibr" rid="B35">Moro et al., 2022</xref>), silicone cochlea chips (<xref ref-type="bibr" rid="B5">Chan et al., 2010</xref>), and FPGA cochleas (<xref ref-type="bibr" rid="B58">Xu et al., 2018a</xref>; <xref ref-type="bibr" rid="B45">Schoepe et al., 2023</xref>) instead of the MEMS cochlea with tunable sensors. Notably, these other approaches typically use inter-aural timing difference (ITD) as the coding paradigm to determine correlates of sound source location, and subsequently pair it with one or multiple stages of feedforward processing. The proposed mechanism here chooses inter-aural level difference (ILD) as the main coding paradigm. Furthermore, it offers interesting avenues for time-domain encoding that depend on the sensitivity range of the specific feedback configuration. Specifically, these feedback projections from the neural processing stage back to the cochlear sensor, with their possibility for different configuration types, are a distinctive element of the proposed model and are not found in any of the other approaches mentioned above-an element from which additional power and complexity for the computation of sound information can be drawn.</p>
<p>Inspired by the variety of connections in the human efferent systems, we studied four different feedback connections as combinations of whether the feedback projects to the ipsi- or the contralateral cochlea sensor and whether it affects the sensor&#x00027;s sensitivity positively or negatively. These feedback configurations correspond to a certain extent to established efferent pathways of the Superior Olivary Complex. Namely, ipsilateral and contralateral feedback reflect the influence of medial olivocochlear fibers, which modulate outer hair cells and dynamically adjust cochlear gain on the same or opposite side, thus enhancing interaural contrast (<xref ref-type="bibr" rid="B31">Lopez-Poveda, 2018</xref>; <xref ref-type="bibr" rid="B16">Guinan, 2018</xref>). Thereby, an inhibiting effect is observed for sounds in quiet environments, similar to negative feedback in our model. In noisy environments, an enhancement of cochlear response for target sounds can be observed (<xref ref-type="bibr" rid="B31">Lopez-Poveda, 2018</xref>), which corresponds to positive feedback in our sensor system in the model. Similarly, lateral olivocochlear efferent connections influence type-I auditory nerve fibers ipsilaterally and contralaterally, whereby enhancing and reducing afferent sensitivity rather than mechanical gain. However, the lateral olivocochlear efferents are less clearly resolved due to the difficulty in stimulating the unmyelinated fibers (<xref ref-type="bibr" rid="B31">Lopez-Poveda, 2018</xref>). Since the feedback to the MEMS cochlea sensor modifies the overall sensor gain, i.e., the overall artificial cochlear response, both effects (outer hair cell modulation and nerve fiber sensitivity change) can be modeled by modulating the feedback gain. Thus, all four modeled configurations map onto plausible Superior Olivary Complex connections, since the biological efferent system contains both ipsi- and contralateral projections, and both gain-enhancing and gain-reducing effects. This explicit alignment strengthens the relevance of our feedback configurations as simplified but biologically grounded abstractions of cochlear efferent control. The influence of the different efferent feedback connections was studied for different excitation-inhibition balances, i.e., input gains for MNTB and LSO neurons&#x00027; excitatory input. All configurations yield stable responses, and the monotonic increase in response amplitude with increasing ILD toward the respective LSO neuron&#x00027;s ipsilateral side aligns well with the general working principle of the biological system. This shows that implementing efferent feedback into a technical system with MEMS-based acoustic sensing units and biologically inspired ILD processing can be feasible and successful in general.</p>
<p>The choice of feedback configuration and excitation-inhibition balance poses itself as a critical design dimension of the system. Changing the feedback configuration tunes the sensitivity of the LSO response, and changing the excitation-inhibition balance shifts the sensitive ILD range. The sensitivity of the steady state responses for the right LSO neuron can be sorted from low to high based on the feedback configuration: (i) positive contralateral, (ii) negative ipsilateral, (iii) no Feedback, and (iv) negative contralateral, which is similar to the positive ipsilateral case. In the latter two cases, due to the more complex interplay between the sensor and neural processing stages, the response curve differs from the sigmoidal shape typically observed in biological systems, and in the absence of efferent feedback. However, this change in shape can be used to encode ILDs differently. In the case of strongly increased sensitivity, a binary response of the LSO neuron is obtained, whereas a reduction in sensitivity in the other two feedback configurations allows for a more analog encoding of ILDs. Depending on the application, one can thus switch between analog and binary encoding of the ILDs.</p>
<p>Regarding the shift in sensitive ILD range, an increase in the inhibition from the contralateral acoustic sensor, i.e., decreasing the excitation-inhibition balance, leads to a shift of the sensitive ILD range to more positive ILDs for the right LSO neuron. Thus, the difference between the two LSO neuron responses remains centered around <italic>ILD</italic> &#x0003D; 0 because different excitatory or inhibitory parameters influence the left and right LSO neuron responses equally, owing to the symmetric architecture. Besides the case of &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/4, a range with almost no sensitivity to ILD changes occurs around <italic>ILD</italic> &#x0003D; 0. In fact, the difference in LSO neuron response appears quite similar for &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/2 and &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/7.5, although the single LSO neuron response shifts strongly. Thus, depending on the processing of the subsequent stages, reading out one LSO neuron might be sufficient (case &#x003C1;<sub><italic>w</italic></sub> &#x0003D; 1/4) or, if the difference of LSO neurons is used, it is possible to obtain a stable response despite shifting the sensitive range of neurons. In the latter case, the sound source localization from the difference of LSO neuron responses refers to a relative position, whereas the single LSO neuron response could provide an absolute position. The difference computation between the left and right LSO outputs serves as a simple linear example for further processing of the system output. It outlines the possibility of recombining different elements of the representation to retain some aspects of the coding principle, e.g., differences in sensitivity (slope) between feedback configurations, while discarding others, e.g., the lateral shift of the response curves. In the simple example presented here, this study performed particularly well because the left and right acoustic sensing and neural processing stages exhibited symmetric response behavior under identical parametrization. Furthermore, the choice of feedback configuration switches the combined response between a more analog and a more digital representation, which would be advantageous for different applications. In this sense, tuning to specific ILD ranges and sensitivities enables dynamic, adaptive sound source localization.</p>
<p>While all feedback configurations lead to stable LSO neuron responses, the temporal responses yield further information about the ILDs and feedback configurations. Here, feedback configurations that resulted in stronger responses than in the case without feedback further yielded oscillatory responses about the steady-state value. This was particularly pronounced for ILDs on the more contralateral side. The first peak of these responses (before the steady-state) is reached much later than the steady-state response in the other cases. This provides an additional temporal coding of ILD and signals to subsequent stages, if feedback were applied to shift the sensitive range toward more contralateral ILDs.</p>
<p>While stable responses were observed in this study, systems without limitations on feedback strength, fast varying input signals, or different parameter settings might yield unstable configurations. Thus, examining the system from a control-theoretic perspective could strengthen the analysis of the observed effects. In particular, linear stability analysis around fixed points, bifurcation analysis, and small-signal gain analysis, or frequency-response characterization, could be applied (<xref ref-type="bibr" rid="B23">Khalil, 2014</xref>), which is useful for uncovering computational properties of recurrent neural networks in general (<xref ref-type="bibr" rid="B48">Sussillo and Barak, 2013</xref>). Additionally, as a more recent technique, contraction analysis can be used to investigate exponential incremental convergence of state-space trajectories to one another without explicitly considering a fixed point (<xref ref-type="bibr" rid="B30">Lohmiller and Slotine, 1998</xref>); see (<xref ref-type="bibr" rid="B51">Tsukamoto et al., 2021</xref>) for a recent tutorial. In individual sub-systems, related investigations were already fruitful. For example, <xref ref-type="bibr" rid="B4">Brosch and Neumann (2014)</xref> studied the mathematical and stability properties of an excitatory-inhibitory (E-I) neuron pair to show how it behaves under different input and parameter combinations. This E-I pair is structurally conserved by the LSO-MNTB circuit and thus could serve as a meaningful basis for sparking further stability analyses in the larger Cantilever-LSO-MNTB system. Further foundational understanding of the system&#x00027;s closed-loop characteristics will help with broader application of the idea. For example, in the neuromorphic setting, this analysis could prove useful for understanding how small differences (e.g., from quantization artifacts during conversion to the chip, or from drift on analog chips) affect the system&#x00027;s functioning.</p>
<p>In principle, the joint system of biologically inspired acoustic sensors and neurons of the auditory pathway can be implemented in neuromorphic hardware. Combining sensor measurements with the neuron models provided a first proof-of-principle. The neuronal stage, i.e., LSO and MNTB neurons, had previously been implemented on neuromorphic hardware, specifically TrueNorth and Spinnaker (<xref ref-type="bibr" rid="B44">Schmid et al., 2023</xref>). In the neuromorphic implementation, the adaptation of the feedback can be used to overcome differences in the sensors due to, e.g., aging or fabrication tolerances. Adding efferent feedback enables tuning sensitive ranges and sensitivity and, thus, the implementation of adaptive sound source localization. The dynamic adaptation of the sound source localization can be used in a closed-loop scheme to minimize localization error. Expected advantages include a fast, noise-robust sound source localization system that can easily handle dynamic and moving sound sources. If combined with a motor control, closed-loop systems for navigation based on sounds could be implemented for real-time and efficient operation, e.g., in robot systems. Furthermore, modulatory control of neuronal responses, e.g., due to GABA receptor adaptation, could be added to enhance the adaptability and tuning of sound source localization and incorporate adaptation due to previous stimuli (<xref ref-type="bibr" rid="B37">Oess et al., 2020a</xref>). Additionally, learning capability for the excitation-inhibition balance as well as the feedback parameters could be introduced to improve the left-right balance of the system and address challenges due to other sources of efferent feedback, which yield a different tuning of sensors as required for sound source localization.</p>
<p>The results were obtained for two acoustic sensors, which limits the localization task to sounds near their resonance frequencies. To expand the implementation for complex sounds, the system can be easily scaled up using multiple acoustic sensors with different resonance frequencies, together with multiple neuronal processing stages. Owing to the band-pass filter characteristics of the sensors, noise sources with different spectral components are effectively filtered, leading to a noise-robust system. The effect of the feedback varies under different environmental noise conditions. Positive feedback to the sensor will decrease its bandwidth and increase the filtering properties. Negative feedback decreases the sensor response when the overall noise level is high, preventing saturation of the sensor and subsequent stages. Thus, the feedback strength could be dynamically adjusted to dampen or strengthen the sensor response for each side depending on the respective noise level. Asymmetric feedback could further increase this effect. Noises influencing the same spectral components can be reduced by decomposing the sound into streams and selectively adjusting the sensitivity to a specific stream by adjusting the feedback settings to dampen or amplify the sensor responses.</p>
<p>The findings highlight the potential of bio-inspired feedback control for next-generation auditory sensors, with promising implications for hearing augmentation and machine listening applications. Potential applications of this technology include advanced hearing aids, robotic auditory systems, and enhanced speech recognition devices. By leveraging the principles of biological adaptation, our approach opens new possibilities for developing intelligent auditory processing systems that can operate effectively in real-world scenarios.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The custom-developed MATLAB simulation code is available in the repository <ext-link ext-link-type="uri" xlink:href="https://github.com/Victory6921/cantilever-sensors-w-neuronal-processing-stage">https://github.com/Victory6921/cantilever-sensors-w-neuronal-processing-stage</ext-link> and the data are publicly available at the following link: <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5281/zenodo.18244293">https://doi.org/10.5281/zenodo.18244293</ext-link>.</p>
</sec>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>SD: Visualization, Formal analysis, Validation, Data curation, Methodology, Writing &#x02013; review &#x00026; editing, Software, Writing &#x02013; original draft, Investigation, Conceptualization. DS: Visualization, Data curation, Methodology, Validation, Conceptualization, Software, Writing &#x02013; original draft, Formal analysis, Investigation, Writing &#x02013; review &#x00026; editing. TO: Writing &#x02013; review &#x00026; editing, Methodology, Writing &#x02013; original draft, Supervision, Conceptualization, Visualization, Validation. HG: Investigation, Writing &#x02013; review &#x00026; editing. HN: Writing &#x02013; original draft, Supervision, Writing &#x02013; review &#x00026; editing. ME: Writing &#x02013; review &#x00026; editing, Supervision. CL: Conceptualization, Funding acquisition, Methodology, Project administration, Resources, Supervision, Validation, Visualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing.</p>
</sec>
<ack><title>Acknowledgments</title><p>The authors acknowledge support by the state of Baden-W&#x000FC;rttemberg through bwHPC.</p></ack>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s8">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s9">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Angeloni</surname> <given-names>C. F.</given-names></name> <name><surname>Mlynarski</surname> <given-names>W.</given-names></name> <name><surname>Piasini</surname> <given-names>E.</given-names></name> <name><surname>Williams</surname> <given-names>A. M.</given-names></name> <name><surname>Wood</surname> <given-names>K. C.</given-names></name> <name><surname>Garami</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Dynamics of cortical contrast adaptation predict perception of signals in noise</article-title>. <source>Nat. Commun</source>. <volume>14</volume>:<fpage>4817</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41467-023-40477-6</pub-id><pub-id pub-id-type="pmid">37558677</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Araujo</surname> <given-names>F. A.</given-names></name> <name><surname>Riou</surname> <given-names>M.</given-names></name> <name><surname>Torrejon</surname> <given-names>J.</given-names></name> <name><surname>Tsunegi</surname> <given-names>S.</given-names></name> <name><surname>Querlioz</surname> <given-names>D.</given-names></name> <name><surname>Yakushiji</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Role of non-linear data processing on speech recognition task in the framework of reservoir computing</article-title>. <source>Sci. Rep</source>. <volume>10</volume>:<fpage>328</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-019-56991-x</pub-id><pub-id pub-id-type="pmid">31941917</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bregman</surname> <given-names>A. S.</given-names></name></person-group> (<year>1990</year>). <source>Auditory Scene Analysis: The Perceptual Organization of Sound</source>. MIT Press. doi: <pub-id pub-id-type="doi">10.7551/mitpress/1486.001.0001</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brosch</surname> <given-names>T.</given-names></name> <name><surname>Neumann</surname> <given-names>H.</given-names></name></person-group> (<year>2014</year>). <article-title>Computing with a canonical neural circuits model with pool normalization and modulating feedback</article-title>. <source>Neural Comput</source>. <volume>26</volume>, <fpage>2735</fpage>&#x02013;<lpage>2789</lpage>. doi: <pub-id pub-id-type="doi">10.1162/NECO_a_00675</pub-id><pub-id pub-id-type="pmid">25248083</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname> <given-names>V. Y.-S.</given-names></name> <name><surname>Jin</surname> <given-names>C. T.</given-names></name> <name><surname>Schaik</surname> <given-names>A. V.</given-names></name></person-group> (<year>2010</year>). <article-title>Adaptive sound localization with a silicon cochlea pair</article-title>. <source>Front. Neurosci</source>. <volume>4</volume>:<fpage>196</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2010.00196</pub-id><pub-id pub-id-type="pmid">21152257</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cherry</surname> <given-names>E. C.</given-names></name></person-group> (<year>1953</year>). <article-title>Some experiments on the recognition of speech, with one and with two ears</article-title>. <source>J. Acoust. Soc. Am</source>. <volume>25</volume>, <fpage>975</fpage>&#x02013;<lpage>979</lpage>. doi: <pub-id pub-id-type="doi">10.1121/1.1907229</pub-id></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Culling</surname> <given-names>J. F.</given-names></name> <name><surname>Lavandier</surname> <given-names>M.</given-names></name></person-group> (<year>2021</year>). <source>Binaural Unmasking and Spatial Release from Masking</source>. Cham: Springer International <volume>Publishing</volume>, <fpage>209</fpage>&#x02013;<lpage>241</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-030-57100-9_8</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dahmen</surname> <given-names>J. C.</given-names></name> <name><surname>Keating</surname> <given-names>P.</given-names></name> <name><surname>Nodal</surname> <given-names>F. R.</given-names></name> <name><surname>Schulz</surname> <given-names>A. L.</given-names></name> <name><surname>King</surname> <given-names>A. J.</given-names></name></person-group> (<year>2010</year>). <article-title>Adaptation to stimulus statistics in the perception and neural representation of auditory space</article-title>. <source>Neuron</source> <volume>66</volume>, <fpage>937</fpage>&#x02013;<lpage>948</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2010.05.018</pub-id><pub-id pub-id-type="pmid">20620878</pub-id></mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Darrow</surname> <given-names>K. N.</given-names></name> <name><surname>Maison</surname> <given-names>S. F.</given-names></name> <name><surname>Liberman</surname> <given-names>M. C.</given-names></name></person-group> (<year>2006</year>). <article-title>Cochlear efferent feedback balances interaural sensitivity</article-title>. <source>Nat. Neurosci</source>. <volume>9</volume>, <fpage>1474</fpage>&#x02013;<lpage>1476</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn1807</pub-id><pub-id pub-id-type="pmid">17115038</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Durstewitz</surname> <given-names>S.</given-names></name> <name><surname>Lenk</surname> <given-names>C.</given-names></name> <name><surname>Ivanov</surname> <given-names>T.</given-names></name> <name><surname>Ziegler</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;Emulation of auditory nerve adaptation with bio-inspired acoustic sensor to extract sound features,&#x0201D;</article-title> in <source>2024 22nd IEEE Interregional NEWCAS Conference (NEWCAS)</source> (<publisher-loc>Sherbrooke, QC</publisher-loc>), <fpage>128</fpage>&#x02013;<lpage>132</lpage>. doi: <pub-id pub-id-type="doi">10.1109/NewCAS58973.2024.10666367</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Durstewitz</surname> <given-names>S.</given-names></name> <name><surname>Lenk</surname> <given-names>C.</given-names></name> <name><surname>Ziegler</surname> <given-names>M.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;Bio-inspired acoustic sensor with gain adaptation enhancing dynamic range and onset detection,&#x0201D;</article-title> in <source>2022 IEEE International Symposium on Circuits and Systems (ISCAS)</source> (<publisher-loc>Austin, TX</publisher-loc>), <fpage>789</fpage>&#x02013;<lpage>793</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ISCAS48785.2022.9937484</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Farhadi</surname> <given-names>A.</given-names></name> <name><surname>Jennings</surname> <given-names>S. G.</given-names></name> <name><surname>Strickland</surname> <given-names>E. A.</given-names></name> <name><surname>Carney</surname> <given-names>L. H.</given-names></name></person-group> (<year>2023</year>). <article-title>Subcortical auditory model including efferent dynamic gain control with inputs from cochlear nucleus and inferior colliculus</article-title>. <source>J. Acoust. Soc. Am.</source> <volume>154</volume>, <fpage>3644</fpage>&#x02013;<lpage>3659</lpage>. doi: <pub-id pub-id-type="doi">10.1121/10.0022578</pub-id><pub-id pub-id-type="pmid">38051523</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Frank</surname> <given-names>M. M.</given-names></name> <name><surname>Goodrich</surname> <given-names>L. V.</given-names></name></person-group> (<year>2018</year>). <article-title>Talking back: development of the olivocochlear efferent system</article-title>. <source>Wiley Interdiscipl. Rev.: Dev. Biol</source>. 7:e324. doi: <pub-id pub-id-type="doi">10.1002/wdev.324</pub-id><pub-id pub-id-type="pmid">29944783</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><collab>Glackin</collab> (<year>2010</year>). <article-title>A spiking neural network model of the medial superior olive using spike timing dependent plasticity for sound localization</article-title>. <source>Front. Comput. Neurosci</source>. <volume>4</volume>:<fpage>18</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fncom.2010.00018</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gleiss</surname> <given-names>H.</given-names></name> <name><surname>Encke</surname> <given-names>J.</given-names></name> <name><surname>Lingner</surname> <given-names>A.</given-names></name> <name><surname>Jennings</surname> <given-names>T. R.</given-names></name> <name><surname>Brosel</surname> <given-names>S.</given-names></name> <name><surname>Kunz</surname> <given-names>L.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Cooperative population coding facilitates efficient sound-source separability by adaptation to input statistics</article-title>. <source>PLoS Biol</source>. <volume>17</volume>:<fpage>e3000150</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pbio.3000150</pub-id><pub-id pub-id-type="pmid">31356637</pub-id></mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Guinan</surname> <given-names>J. J.</given-names></name></person-group> (<year>2018</year>). <article-title>Olivocochlear efferents: their action, effects, measurement and uses, and the impact of the new conception of cochlear mechanical responses</article-title>. <source>Hear. Res</source>. <volume>362</volume>, <fpage>38</fpage>&#x02013;<lpage>47</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2017.12.012</pub-id><pub-id pub-id-type="pmid">29291948</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Guinan</surname> <given-names>J. J. Jr.</given-names></name></person-group> (<year>2006</year>). <article-title>Olivocochlear efferents: anatomy, physiology, function, and the measurement of efferent effects in humans</article-title>. <source>Ear Hear</source>. <volume>27</volume>, <fpage>589</fpage>&#x02013;<lpage>607</lpage>. doi: <pub-id pub-id-type="doi">10.1097/01.aud.0000240507.83072.e7</pub-id><pub-id pub-id-type="pmid">17086072</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Tapson</surname> <given-names>J.</given-names></name> <name><surname>Jin</surname> <given-names>C.</given-names></name> <name><surname>Van Schaik</surname> <given-names>A.</given-names></name></person-group> (<year>2008</year>). <article-title>&#x0201C;Analogue vlsi implementations of two dimensional, nonlinear, active cochlea models,&#x0201D;</article-title> in <source>2008 IEEE Biomedical Circuits and Systems Conference</source> (<publisher-loc>Baltimore, MD</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>153</fpage>&#x02013;<lpage>156</lpage>. doi: <pub-id pub-id-type="doi">10.1109/BIOCAS.2008.4696897</pub-id></mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jim&#x000E9;nez-Fernandez</surname> <given-names>A.</given-names></name> <name><surname>Cerezuela-Escudero</surname> <given-names>E.</given-names></name> <name><surname>Mir&#x000F3;-Amarante</surname> <given-names>L.</given-names></name> <name><surname>Dom&#x000ED;nguez-Morales</surname> <given-names>M. J.</given-names></name> <name><surname>G&#x000F3;mez-Rodr&#x000ED;guez</surname> <given-names>F. A.</given-names></name> <name><surname>Linares-Barranco</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>A binaural neuromorphic auditory sensor for FPGA: a spike signal processing approach</article-title>. <source>IEEE Trans. Neural Netw. Learn. Syst</source>. <volume>28</volume>, <fpage>804</fpage>&#x02013;<lpage>818</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TNNLS.2016.2583223</pub-id><pub-id pub-id-type="pmid">27479979</pub-id></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johann Rolf</surname> <given-names>H. F.</given-names></name> <name><surname>Meurer</surname> <given-names>T.</given-names></name></person-group> (<year>2023</year>). <article-title>&#x0201C;Amplitude control for an artificial hair cell undergoing an andronov-hopf bifurcation,&#x0201D;</article-title> in <source>IFAC-PapersOnLine, 56, 181-186. 12th IFAC Symposium on Nonlinear Control Systems NOLCOS 2022</source>. doi: <pub-id pub-id-type="doi">10.1016/j.ifacol.2023.02.031</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Johny</surname> <given-names>S.</given-names></name> <name><surname>Ved</surname> <given-names>K.</given-names></name> <name><surname>Durstewitz</surname> <given-names>S.</given-names></name> <name><surname>Ivanov</surname> <given-names>T.</given-names></name> <name><surname>Ziegler</surname> <given-names>M.</given-names></name> <name><surname>Lenk</surname> <given-names>C.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;Bio-inspired sensor-based sound pre-processing for speech recognition in noisy conditions,&#x0201D;</article-title> in <source>2024 IEEE Biomedical Circuits and Systems Conference (BioCAS)</source> (<publisher-loc>Xi&#x00027;an</publisher-loc>), <fpage>15</fpage>. doi: <pub-id pub-id-type="doi">10.1109/BioCAS61083.2024.10798207</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khalighinejad</surname> <given-names>B.</given-names></name> <name><surname>Herrero</surname> <given-names>J. L.</given-names></name> <name><surname>Mehta</surname> <given-names>A. D.</given-names></name> <name><surname>Mesgarani</surname> <given-names>N.</given-names></name></person-group> (<year>2019</year>). <article-title>Adaptation of the human auditory cortex to changing background noise</article-title>. <source>Nat. Commun</source>. <volume>10</volume>:<fpage>2509</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41467-019-10611-4</pub-id><pub-id pub-id-type="pmid">31175304</pub-id></mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Khalil</surname> <given-names>H. K.</given-names></name></person-group> (<year>2014</year>). <source>Nonlinear Systems, 3rd Edn</source>. <publisher-loc>Pearson new international edition. Essex</publisher-loc>: <publisher-name>Pearson Education Limited</publisher-name>.</mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kidd</surname> <given-names>G.</given-names></name> <name><surname>Arbogast</surname> <given-names>T. L.</given-names></name> <name><surname>Mason</surname> <given-names>C. R.</given-names></name> <name><surname>Gallun</surname> <given-names>F. J.</given-names></name></person-group> (<year>2005</year>). <article-title>The advantage of knowing where to listen</article-title>. <source>J. Acoust. Soc. Am</source>. <volume>118</volume>, <fpage>3804</fpage>&#x02013;<lpage>3815</lpage>. doi: <pub-id pub-id-type="doi">10.1121/1.2109187</pub-id><pub-id pub-id-type="pmid">16419825</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Kiselev</surname> <given-names>I.</given-names></name> <name><surname>Liu</surname> <given-names>S.-C.</given-names></name></person-group> (<year>2021</year>). <article-title>&#x0201C;Event-driven local gain control on a spiking cochlea sensor,&#x0201D;</article-title> in <source>2021 IEEE International Symposium on Circuits and Systems (ISCAS)</source> (<publisher-loc>Daegu</publisher-loc>), <fpage>15</fpage>. doi: <pub-id pub-id-type="doi">10.1109/ISCAS51556.2021.9401742</pub-id></mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kumar</surname> <given-names>S.</given-names></name> <name><surname>Charan</surname> <given-names>G.</given-names></name> <name><surname>Gounipuram</surname> <given-names>S.</given-names></name> <name><surname>Sarma</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <source>Unveiling the Challenges of Speech Recognition in Noisy Environments: A Comprehensive Review of Issues and Solutions</source>. doi: <pub-id pub-id-type="doi">10.13140/RG.2.2.24231.76966</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lenk</surname> <given-names>C.</given-names></name> <name><surname>H&#x000F6;vel</surname> <given-names>P.</given-names></name> <name><surname>Ved</surname> <given-names>K.</given-names></name> <name><surname>Durstewitz</surname> <given-names>S.</given-names></name> <name><surname>Meurer</surname> <given-names>T.</given-names></name> <name><surname>Fritsch</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Neuromorphic acoustic sensing using an adaptive microelectromechanical cochlea with integrated feedback</article-title>. <source>Nat. Electr</source>. <volume>6</volume>, <fpage>370</fpage>&#x02013;<lpage>380</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41928-023-00957-5</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lingner</surname> <given-names>A.</given-names></name> <name><surname>Pecka</surname> <given-names>M.</given-names></name> <name><surname>Leibold</surname> <given-names>C.</given-names></name> <name><surname>Grothe</surname> <given-names>B.</given-names></name></person-group> (<year>2018</year>). <article-title>A novel concept for dynamic adjustment of auditory space</article-title>. <source>Sci. Rep</source>. <volume>8</volume>:<fpage>8335</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-018-26690-0</pub-id><pub-id pub-id-type="pmid">29844516</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>S.-C.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name> <name><surname>Minch</surname> <given-names>B. A.</given-names></name> <name><surname>Delbruck</surname> <given-names>T.</given-names></name></person-group> (<year>2014</year>). <article-title>Asynchronous binaural spatial audition sensor with 2 &#x000D7; 64 &#x000D7; 4 channel output</article-title>. <source>IEEE Trans. Biomed. Circuits Syst</source>. <volume>8</volume>, <fpage>453</fpage>&#x02013;<lpage>464</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TBCAS.2013.2281834</pub-id><pub-id pub-id-type="pmid">24216772</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lohmiller</surname> <given-names>W.</given-names></name> <name><surname>Slotine</surname> <given-names>J.-J. E.</given-names></name></person-group> (<year>1998</year>). <article-title>On contraction analysis for non-linear systems</article-title>. <source>Automatica</source> <volume>34</volume>, <fpage>683</fpage>&#x02013;<lpage>696</lpage>. doi: <pub-id pub-id-type="doi">10.1016/S0005-1098(98)00019-3</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lopez-Poveda</surname> <given-names>E. A.</given-names></name></person-group> (<year>2018</year>). <article-title>Olivocochlear efferents in animals and humans: from anatomy to clinical relevance</article-title>. <source>Front. Neurol</source>. <volume>9</volume>:<fpage>197</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fneur.2018.00197</pub-id><pub-id pub-id-type="pmid">29632514</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lopez-Poveda</surname> <given-names>E. A.</given-names></name> <name><surname>Eustaquio-Mart&#x000ED;n</surname> <given-names>A.</given-names></name> <name><surname>Stohl</surname> <given-names>J. S.</given-names></name> <name><surname>Wolford</surname> <given-names>R. D.</given-names></name> <name><surname>Schatzer</surname> <given-names>R.</given-names></name> <name><surname>Gorospe</surname> <given-names>J. M.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Intelligibility in speech maskers with a binaural cochlear implant sound coding strategy inspired by the contralateral medial olivocochlear reflex</article-title>. <source>Hear. Res</source>. <volume>348</volume>, <fpage>134</fpage>&#x02013;<lpage>137</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.heares.2017.02.003</pub-id><pub-id pub-id-type="pmid">28188882</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mawalim</surname> <given-names>C. O.</given-names></name> <name><surname>Okada</surname> <given-names>S.</given-names></name> <name><surname>Unoki</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>Are recent deep learning-based speech enhancement methods ready to confront real-world noisy environments?</article-title> <source>Interspeech</source> <volume>2024</volume>, <fpage>1735</fpage>&#x02013;<lpage>1739</lpage>. doi: <pub-id pub-id-type="doi">10.21437/Interspeech.2024-129</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>McDermott</surname> <given-names>J. H.</given-names></name></person-group> (<year>2009</year>). <article-title>The cocktail party problem</article-title>. <source>Curr. Biol</source>. <volume>19</volume>, <fpage>R1024</fpage>&#x02013;<lpage>R1027</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cub.2009.09.005</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Moro</surname> <given-names>F.</given-names></name> <name><surname>Hardy</surname> <given-names>E.</given-names></name> <name><surname>Fain</surname> <given-names>B.</given-names></name> <name><surname>Dalgaty</surname> <given-names>T.</given-names></name> <name><surname>Cl&#x000E9;men&#x000E7;on</surname> <given-names>P.</given-names></name> <name><surname>De Pr&#x000E0;</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Neuromorphic object localization using resistive memories and ultrasonic transducers</article-title>. <source>Nat. Commun</source>. <volume>13</volume>:<fpage>3506</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41467-022-31157-y</pub-id><pub-id pub-id-type="pmid">35717413</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nouri</surname> <given-names>M.</given-names></name> <name><surname>Ahmadi</surname> <given-names>A.</given-names></name> <name><surname>Alirezaee</surname> <given-names>S.</given-names></name> <name><surname>Karimi</surname> <given-names>G.</given-names></name> <name><surname>Ahmadi</surname> <given-names>M.</given-names></name> <name><surname>Abbott</surname> <given-names>D.</given-names></name></person-group> (<year>2015</year>). <article-title>A Hopf resonator for 2-D artificial cochlea: piecewise linear model and digital implementation</article-title>. <source>IEEE Trans. Circuits Syst. I: Regul. Pap</source>. <volume>62</volume>, <fpage>1117</fpage>&#x02013;<lpage>1125</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCSI.2015.2390555</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oess</surname> <given-names>T.</given-names></name> <name><surname>Ernst</surname> <given-names>M. O.</given-names></name> <name><surname>Neumann</surname> <given-names>H.</given-names></name></person-group> (<year>2020a</year>). <article-title>Computational principles of neural adaptation for binaural signal integration</article-title>. <source>PLoS Comput. Biol</source>. <volume>16</volume>:<fpage>e1008020</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pcbi.1008020</pub-id><pub-id pub-id-type="pmid">32678847</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Oess</surname> <given-names>T.</given-names></name> <name><surname>L&#x000F6;hr</surname> <given-names>M.</given-names></name> <name><surname>Jarvers</surname> <given-names>C.</given-names></name> <name><surname>Schmid</surname> <given-names>D.</given-names></name> <name><surname>Neumann</surname> <given-names>H.</given-names></name></person-group> (<year>2020b</year>). <article-title>&#x0201C;A bio-inspired model of sound source localization on neuromorphic hardware,&#x0201D;</article-title> in <source>2020 2nd IEEE International Conference on Artificial Intelligence Circuits and Systems (AICAS)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>103</fpage>&#x02013;<lpage>107</lpage>. doi: <pub-id pub-id-type="doi">10.1109/AICAS48895.2020.9073935</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Papesh</surname> <given-names>M. A.</given-names></name> <name><surname>Folmer</surname> <given-names>R. L.</given-names></name> <name><surname>Gallun</surname> <given-names>F. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Cortical measures of binaural processing predict spatial release from masking performance</article-title>. <source>Front. Hum. Neurosci</source>. <volume>11</volume>:<fpage>124</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2017.00124</pub-id><pub-id pub-id-type="pmid">28377706</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Patman</surname> <given-names>C.</given-names></name> <name><surname>Chodroff</surname> <given-names>E.</given-names></name></person-group> (<year>2024</year>). <article-title>Speech recognition in adverse conditions by humans and machines</article-title>. <source>JASA Express Letters</source> <volume>4</volume>:<fpage>115204</fpage>. doi: <pub-id pub-id-type="doi">10.1121/10.0032473</pub-id><pub-id pub-id-type="pmid">39531098</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rangelow</surname> <given-names>I. W.</given-names></name> <name><surname>Ivanov</surname> <given-names>T.</given-names></name> <name><surname>Ahmad</surname> <given-names>A.</given-names></name> <name><surname>Kaestner</surname> <given-names>M.</given-names></name> <name><surname>Lenk</surname> <given-names>C.</given-names></name> <name><surname>Bozchalooi</surname> <given-names>I. S.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Review article: active scanning probes: a versatile toolkit for fast imaging and emerging nanofabrication</article-title>. <source>J. Vacuum Sci. Technol. B</source> <volume>35</volume>:<fpage>06G</fpage>101. doi: <pub-id pub-id-type="doi">10.1116/1.4992073</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reeve</surname> <given-names>R.</given-names></name> <name><surname>Webb</surname> <given-names>B.</given-names></name> <name><surname>Horchler</surname> <given-names>A.</given-names></name> <name><surname>Indiveri</surname> <given-names>G.</given-names></name> <name><surname>Quinn</surname> <given-names>R.</given-names></name></person-group> (<year>2005</year>). <article-title>New technologies for testing a model of cricket phonotaxis on an outdoor robot</article-title>. <source>Robot. Autonom. Syst</source>. <volume>51</volume>, <fpage>41</fpage>&#x02013;<lpage>54</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.robot.2004.08.010</pub-id></mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Roeser</surname> <given-names>D.</given-names></name> <name><surname>Gutschmidt</surname> <given-names>S.</given-names></name> <name><surname>Sattel</surname> <given-names>T.</given-names></name> <name><surname>Rangelow</surname> <given-names>I. W.</given-names></name></person-group> (<year>2016</year>). <article-title>Tip motion&#x02013;sensor signal relation for a composite spm/spl cantilever</article-title>. <source>J. Microelectromech. Syst</source>. <volume>25</volume>, <fpage>78</fpage>&#x02013;<lpage>90</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JMEMS.2015.2482389</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schmid</surname> <given-names>D.</given-names></name> <name><surname>Oess</surname> <given-names>T.</given-names></name> <name><surname>Neumann</surname> <given-names>H.</given-names></name></person-group> (<year>2023</year>). <article-title>Listen to the brain-auditory sound source localization in neuromorphic computing architectures</article-title>. <source>Sensors</source> <volume>23</volume>:<fpage>4451</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s23094451</pub-id><pub-id pub-id-type="pmid">37177655</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schoepe</surname> <given-names>T.</given-names></name> <name><surname>Gutierrez-Galan</surname> <given-names>D.</given-names></name> <name><surname>Dominguez-Morales</surname> <given-names>J. P.</given-names></name> <name><surname>Greatorex</surname> <given-names>H.</given-names></name> <name><surname>Jimenez-Fernandez</surname> <given-names>A.</given-names></name> <name><surname>Linares-Barranco</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Closed-loop sound source localization in neuromorphic systems</article-title>. <source>Neuromorph. Comput. Eng</source>. <volume>3</volume>:<fpage>024009</fpage>. doi: <pub-id pub-id-type="doi">10.1088/2634-4386/acdaba</pub-id></mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>R. K.</given-names></name> <name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>R.</given-names></name> <name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Denham</surname> <given-names>S. L.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name></person-group> (<year>2019</year>). <article-title>CAR-Lite: a multi-rate cochlear model on FPGA for spike-based sound encoding</article-title>. <source>IEEE Trans. Circuits Syst. I: Regul. Pap</source>. <volume>66</volume>, <fpage>1805</fpage>&#x02013;<lpage>1817</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TCSI.2018.2868247</pub-id></mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stange</surname> <given-names>A.</given-names></name> <name><surname>Myoga</surname> <given-names>M. H.</given-names></name> <name><surname>Lingner</surname> <given-names>A.</given-names></name> <name><surname>Ford</surname> <given-names>M. C.</given-names></name> <name><surname>Alexandrova</surname> <given-names>O.</given-names></name> <name><surname>Felmy</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Adaptation in sound localization: from gabab receptor-mediated synaptic modulation to perception</article-title>. <source>Nat. Neurosci</source>. <volume>16</volume>, <fpage>1840</fpage>&#x02013;<lpage>1847</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.3548</pub-id></mixed-citation>
</ref>
<ref id="B48">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sussillo</surname> <given-names>D.</given-names></name> <name><surname>Barak</surname> <given-names>O.</given-names></name></person-group> (<year>2013</year>). <article-title>Opening the black box: low-dimensional dynamics in high-dimensional recurrent neural networks</article-title>. <source>Neural Computat</source>. <volume>25</volume>, <fpage>626</fpage>&#x02013;<lpage>649</lpage>. doi: <pub-id pub-id-type="doi">10.1162/NECO_a_00409</pub-id><pub-id pub-id-type="pmid">23272922</pub-id></mixed-citation>
</ref>
<ref id="B49">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Thakur</surname> <given-names>C. S.</given-names></name> <name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Tapson</surname> <given-names>J.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name> <name><surname>Lyon</surname> <given-names>R. F.</given-names></name></person-group> (<year>2014</year>). <article-title>&#x0201C;FPGA implementation of the CAR Model of the cochlea,&#x0201D;</article-title> in <source>2014 IEEE International Symposium on Circuits and Systems (ISCAS)</source> (<publisher-loc>Melbourne, VIC</publisher-loc>), <fpage>1853</fpage>&#x02013;<lpage>1856</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ISCAS.2014.6865519</pub-id></mixed-citation>
</ref>
<ref id="B50">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tollin</surname> <given-names>D. J.</given-names></name></person-group> (<year>2003</year>). <article-title>The lateral superior olive: a functional role in sound source localization</article-title>. <source>Neuroscientist</source> <volume>9</volume>, <fpage>127</fpage>&#x02013;<lpage>143</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1073858403252228</pub-id><pub-id pub-id-type="pmid">12708617</pub-id></mixed-citation>
</ref>
<ref id="B51">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tsukamoto</surname> <given-names>H.</given-names></name> <name><surname>Chung</surname> <given-names>S.-J.</given-names></name> <name><surname>Slotine</surname> <given-names>J.-J. E.</given-names></name></person-group> (<year>2021</year>). <article-title>Contraction theory for nonlinear stability analysis and learning-based control: a tutorial overview</article-title>. <source>Annu. Rev. Control</source> <volume>52</volume>, <fpage>135</fpage>&#x02013;<lpage>169</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.arcontrol.2021.10.001</pub-id></mixed-citation>
</ref>
<ref id="B52">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>van Schaik</surname> <given-names>A.</given-names></name> <name><surname>Liu</surname> <given-names>S.-C.</given-names></name></person-group> (<year>2005</year>). <article-title>&#x0201C;AER EAR: a matched silicon cochlea pair with address event representation interface,&#x0201D;</article-title> in <source>2005 IEEE International Symposium on Circuits and Systems (ISCAS), Vol. 5</source> (<publisher-loc>Kobe</publisher-loc>), <fpage>4213</fpage>&#x02013;<lpage>4216</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ISCAS.2005.1465560</pub-id></mixed-citation>
</ref>
<ref id="B53">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ved</surname> <given-names>K.</given-names></name> <name><surname>Lenk</surname> <given-names>C.</given-names></name> <name><surname>Ivanov</surname> <given-names>T.</given-names></name> <name><surname>H&#x000F6;vel</surname> <given-names>P.</given-names></name> <name><surname>Ziegler</surname> <given-names>M.</given-names></name></person-group> (<year>2024</year>). <article-title>Bio-inspired, adaptive acoustic sensor: Sensing properties in dependence of feedback parameters</article-title>. <source>AIP Conf. Proc</source>. <volume>3062</volume>:<fpage>040011</fpage>. doi: <pub-id pub-id-type="doi">10.1063/5.0189488</pub-id></mixed-citation>
</ref>
<ref id="B54">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Koickal</surname> <given-names>T. J.</given-names></name> <name><surname>Hamilton</surname> <given-names>A.</given-names></name> <name><surname>Cheung</surname> <given-names>R.</given-names></name> <name><surname>Smith</surname> <given-names>L. S.</given-names></name></person-group> (<year>2015</year>). <article-title>A bio-realistic analog CMOS cochlea filter with high tunability and ultra-steep roll-off</article-title>. <source>IEEE Trans. Biomed. Circ. Syst</source>. <volume>9</volume>, <fpage>297</fpage>&#x02013;<lpage>311</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TBCAS.2014.2328321</pub-id><pub-id pub-id-type="pmid">25099631</pub-id></mixed-citation>
</ref>
<ref id="B55">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>W.</given-names></name> <name><surname>Pedretti</surname> <given-names>G.</given-names></name> <name><surname>Milo</surname> <given-names>V.</given-names></name> <name><surname>Carboni</surname> <given-names>R.</given-names></name> <name><surname>Calderoni</surname> <given-names>A.</given-names></name> <name><surname>Ramaswamy</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Learning of spatiotemporal patterns in a spiking neural network with resistive switching synapses</article-title>. <source>Sci. Adv</source>. <volume>4</volume>:<fpage>eaat4752</fpage>. doi: <pub-id pub-id-type="doi">10.1126/sciadv.aat4752</pub-id><pub-id pub-id-type="pmid">30214936</pub-id></mixed-citation>
</ref>
<ref id="B56">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ware</surname> <given-names>E. E.</given-names></name> <name><surname>Roberts</surname> <given-names>M. T.</given-names></name> <name><surname>Flynn</surname> <given-names>M. P.</given-names></name></person-group> (<year>2025</year>). <article-title>A multi-stage auditory model for binaural sound localization using the locally competitive algorithm</article-title>. <source>Sci. Rep</source>. <volume>15</volume>:<fpage>27048</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-025-11613-7</pub-id><pub-id pub-id-type="pmid">40715443</pub-id></mixed-citation>
</ref>
<ref id="B57">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Willmore</surname> <given-names>B. D. B.</given-names></name> <name><surname>King</surname> <given-names>A. J.</given-names></name></person-group> (<year>2023</year>). <article-title>Adaptation in auditory processing</article-title>. <source>Physiol. Rev</source>. <volume>103</volume>, <fpage>1025</fpage>&#x02013;<lpage>1058</lpage>. doi: <pub-id pub-id-type="doi">10.1152/physrev.00011.2022</pub-id><pub-id pub-id-type="pmid">36049112</pub-id></mixed-citation>
</ref>
<ref id="B58">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Afshar</surname> <given-names>S.</given-names></name> <name><surname>Singh</surname> <given-names>R. K.</given-names></name> <name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Wang</surname> <given-names>R.</given-names></name> <name><surname>Van Schaik</surname> <given-names>A.</given-names></name></person-group> (<year>2018a</year>). <article-title>&#x0201C;A machine hearing system for binaural sound localization based on instantaneous correlation,&#x0201D;</article-title> in <source>2018 IEEE International Symposium on Circuits and Systems (ISCAS)</source> (<publisher-loc>Florence</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x02013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ISCAS.2018.8351367</pub-id></mixed-citation>
</ref>
<ref id="B59">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Y.</given-names></name> <name><surname>Thakur</surname> <given-names>C. S.</given-names></name> <name><surname>Singh</surname> <given-names>R. K.</given-names></name> <name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Wang</surname> <given-names>R. M.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name></person-group> (<year>2018b</year>). <article-title>A FPGA implementation of the CAR-FAC cochlear model</article-title>. <source>Front. Neurosci</source>. <volume>12</volume>:<fpage>198</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2018.00198</pub-id><pub-id pub-id-type="pmid">29692700</pub-id></mixed-citation>
</ref>
<ref id="B60">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Yang</surname> <given-names>M.</given-names></name> <name><surname>Chien</surname> <given-names>C.-H.</given-names></name> <name><surname>Delbruck</surname> <given-names>T.</given-names></name> <name><surname>Liu</surname> <given-names>S.-C.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;A 0.5V 55&#x003BC;W 64 &#x000D7; 2-channel binaural silicon cochlea for event-driven stereo-audio sensing,&#x0201D;</article-title> in <source>2016 IEEE International Solid-State Circuits Conference (ISSCC)</source> (<publisher-loc>San Francisco, CA</publisher-loc>), <fpage>388</fpage>&#x02013;<lpage>389</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ISSCC.2016.7418070</pub-id></mixed-citation>
</ref>
<ref id="B61">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>D.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Belatreche</surname> <given-names>A.</given-names></name> <name><surname>Wei</surname> <given-names>W.</given-names></name> <name><surname>Xiao</surname> <given-names>Y.</given-names></name> <name><surname>Zheng</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Spike-based neuromorphic model for sound source localization</article-title>. <source>Adv. Neural Inf. Process. Syst.</source> <volume>37</volume>, <fpage>113911</fpage>&#x02013;<lpage>113936</lpage>. doi: <pub-id pub-id-type="doi">10.52202/079017-3617</pub-id></mixed-citation>
</ref>
<ref id="B62">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>H.-B.</given-names></name> <name><surname>Liu</surname> <given-names>L.-M.</given-names></name> <name><surname>Yu</surname> <given-names>N.</given-names></name> <name><surname>Zhu</surname> <given-names>Y.</given-names></name> <name><surname>Mei</surname> <given-names>L.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Efferent neurons control hearing sensitivity and protect hearing from noise through the regulation of gap junctions between cochlear supporting cells</article-title>. <source>J. Neurophysiol</source>. <volume>127</volume>, <fpage>313</fpage>&#x02013;<lpage>327</lpage>. doi: <pub-id pub-id-type="doi">10.1152/jn.00468.2021</pub-id><pub-id pub-id-type="pmid">34907797</pub-id></mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/492832/overview">Tamas Harczos</ext-link>, University of Applied Sciences Erfurt, Germany</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3266732/overview">Farzaneh Darki</ext-link>, University of Exeter, United Kingdom</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3266766/overview">Hrishikesh Gosavi</ext-link>, Trane Technologies, United States</p>
</fn>
</fn-group>
</back>
</article>