<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2026.1605209</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Spiking neural networks provide accurate and time-efficient models for whisker stimulus classification of the awake mouse</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Albrecht</surname>
<given-names>Steffen</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<xref ref-type="author-notes" rid="fn0001"><sup>&#x2020;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1649020"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Vandevelde</surname>
<given-names>Jens R.</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Vecchi</surname>
<given-names>Edoardo</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Berra</surname>
<given-names>Gabriele</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Bassetti</surname>
<given-names>Davide</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3248561"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>St&#x00FC;ttgen</surname>
<given-names>Maik C.</given-names>
</name>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/39891"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Luhmann</surname>
<given-names>Heiko J.</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/3546"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Horenko</surname>
<given-names>Illia</given-names>
</name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition/">Funding acquisition</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
</contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Institute of Physiology, University Medical Center of the Johannes Gutenberg University Mainz</institution>, <city>Mainz</city>, <country country="de">Germany</country></aff>
<aff id="aff2"><label>2</label><institution>Undergraduate Education (FBDTI), Department of Innovative Technologies, University of Applied Sciences and Arts of Southern Switzerland</institution>, <city>Lugano-Viganello</city>, <country country="ch">Switzerland</country></aff>
<aff id="aff3"><label>3</label><institution>Department of Mathematics, Artificial Intelligence in Mathematics, TU Kaiserslautern</institution>, <city>Kaiserslautern</city>, <country country="de">Germany</country></aff>
<aff id="aff4"><label>4</label><institution>Institute of Pathophysiology, University Medical Center of the Johannes Gutenberg University Mainz</institution>, <city>Mainz</city>, <country country="de">Germany</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Steffen Albrecht, <email xlink:href="mailto:salbrec@uni-mainz.de">salbrec@uni-mainz.de</email>; Maik C. St&#x00FC;ttgen, <email xlink:href="mailto:maik.stuettgen@uni-mainz.de">maik.stuettgen@uni-mainz.de</email>; Heiko J. Luhmann, <email xlink:href="mailto:luhmann@uni-mainz.de">luhmann@uni-mainz.de</email>; Illia Horenko, <email xlink:href="mailto:horenko@rptu.de">horenko@rptu.de</email></corresp>
<fn fn-type="present-address" id="fn0001">
<label>&#x2020;</label>
<p>Present addresses: Steffen Albrecht, Department of General Practice and Primary Healthcare, University of Auckland, Auckland, New Zealand</p>
</fn>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-04-01">
<day>01</day>
<month>04</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>20</volume>
<elocation-id>1605209</elocation-id>
<history>
<date date-type="received">
<day>03</day>
<month>04</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>05</day>
<month>03</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>13</day>
<month>03</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Albrecht, Vandevelde, Vecchi, Berra, Bassetti, St&#x00FC;ttgen, Luhmann and Horenko.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Albrecht, Vandevelde, Vecchi, Berra, Bassetti, St&#x00FC;ttgen, Luhmann and Horenko</copyright-holder>
<license>
<ali:license_ref start_date="2026-04-01">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Machine learning algorithms have great potential for classifying brain activity, and lightweight classifier algorithms, requiring little computational resources, can be used on low-energy neuromorphic hardware designed for implantable neuroprosthetics. One of these efficient algorithms, the Liquid State Machine, implements the concept of Spiking Neural Networks and has been shown to achieve outstanding results on the task of whisker stimulus detection from the mouse barrel cortex, a widely used model system. While this is promising for neuroprosthetics, it has been unclear how a Spiking Neural Network or other machine learning algorithms perform on data recorded from awake mice and how trained models generalize across individuals, the latter being relevant to transferring trained models to new hardware. Using laminar multi-electrode local field potential recordings obtained from four mice performing a single-whisker detection task, we benchmarked the performance of a collection of lightweight classification algorithms. We found that the Liquid State Machine, a generalized linear model, and the time series classifier ROCKET are the most accurate for stimulus detection. Among those, the Liquid State Machine achieved the fastest model training and inference runtime and provided robust accuracy across individual mice. Additional analyses show that there is no significant improvement in using multiple cortical layers as input for the model and that 40&#x202F;ms of stimulus recording is sufficient to maintain high detection accuracy.</p>
</abstract>
<kwd-group>
<kwd>electrophysiology</kwd>
<kwd>local field potential</kwd>
<kwd>machine learning</kwd>
<kwd>neural prosthesis</kwd>
<kwd>neuroprosthetics</kwd>
<kwd>spiking neural networks</kwd>
<kwd>whisker stimulus classification</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was received for this work and/or its publication. We gratefully acknowledge the funding from the Carl-Zeiss Foundation (0563-2.8/738/2) initiative &#x201C;Emergent Algorithmic Intelligence.&#x201D; This work was supported by grants from the Deutsche Forschungsgemeinschaft (LU375/11-1 to HL and STU544/3-1 and STU544/4-1 to MS).</funding-statement>
</funding-group>
<counts>
<fig-count count="5"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="76"/>
<page-count count="13"/>
<word-count count="11759"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Neuroprosthetics</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<title>Introduction</title>
<p>The development and improvement of brain-machine interfaces (BMI) strongly advanced during the last decades to process brain signals for the control of an external computer or machine (<xref ref-type="bibr" rid="ref42">Rapeaux and Constandinou, 2021</xref>; <xref ref-type="bibr" rid="ref54">Sitaram et al., 2017</xref>). BMIs help paralyzed patients suffering from severe brain damage or disrupted connectivity between different brain regions, as typically caused by, for example, spinal cord damage, stroke, or neurodegenerative diseases such as amyotrophic lateral sclerosis (<xref ref-type="bibr" rid="ref53">Semprini et al., 2018</xref>). BMIs can read out and interpret brain activity to control external actuators such as robotic arms or to function as a spelling interface, thus enabling patients to write text and communicate with their social environment (<xref ref-type="bibr" rid="ref45">Rezeika et al., 2018</xref>; <xref ref-type="bibr" rid="ref62">Sunny et al., 2016</xref>).</p>
<p>Instead of turning neural activity into an action performed by a computer or machine, the interface can also create a signal to be sent back to the brain. This would result in a closed-loop system that creates artificial stimulation for the brain based on the signal it receives from it. Such systems are called brain-machine-brain interfaces (BMBIs) and are promising for neural prostheses to replace impaired or even missing biological functionality (<xref ref-type="bibr" rid="ref26">Krucoff et al., 2016</xref>). For instance, it has been shown that such closed-loop systems can bridge damaged neural pathways in the rat brain after traumatic brain injury by interpreting action potentials captured by multielectrode arrays (MEA) implanted in the cerebral cortex (<xref ref-type="bibr" rid="ref21">Guggenmos et al., 2013</xref>).</p>
<p>MEAs present several advantages when considered for a neural prosthesis, as the shape of the MEA probe can be tailored to each individual case and is flexible enough to adapt to different brain regions. They provide electrophysiological recordings at a high spatiotemporal resolution that can be further processed differently. One option is to use the local field potential (LFP), i.e., the down-sampled and low-pass filtered raw signal, which includes frequency components below 300&#x202F;Hz (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>). The LFP reflects the gross spatially weighted average of membrane potential fluctuations of thousands of neurons within a few hundred microns around the electrode (<xref ref-type="bibr" rid="ref9">Buzs&#x00E1;ki et al., 2012</xref>). Another option is to extract action potentials or spikes from the high-frequency signal, usually band-pass filtered between 300 and 3,000&#x202F;Hz. In the post-processing of the signal (spike sorting), spikes are categorized as single units if the waveform can be clearly identified as action potentials of a single neuron. Otherwise, if spikes are overlapping, they are accumulated as linear combinations of action potentials from small neuron populations in the vicinity of the recording electrode, called multi-unit activity (<xref ref-type="bibr" rid="ref48">Rossant et al., 2016</xref>). In both cases, computational approaches implemented on the neural prosthesis are challenged to interpret the incoming data accurately. An additional requirement is that these approaches must operate on small-scale and low-powered hardware that is suitable for chronic implantation but provides limited computational resources.</p>
<p>To explore the capability of computational methods to comply with these constraints in a BMBI scenario, the rodent barrel cortex provides an advantageous experimental model (<xref ref-type="bibr" rid="ref18">Feldmeyer et al., 2013</xref>). Its prominent organization in cortical columns provides a one-to-one topographic representation of single whiskers, sensory organs on the animal&#x2019;s snout that can be mechanically stimulated in a well-controlled manner (<xref ref-type="bibr" rid="ref56">Staiger and Petersen, 2021</xref>). Whisker stimulation evokes a localized response in the barrel cortex that can be captured with MEAs and analyzed with the appropriate computational approaches like machine learning (ML) classification algorithms. Based on such recordings, it has been shown that ML classification algorithms, or classifiers, can accurately identify the cortical depth of the recording electrode and the deflection intensity of the whisker stimulus (<xref ref-type="bibr" rid="ref67">Wang et al., 2018a</xref>; <xref ref-type="bibr" rid="ref68">Wang et al., 2018b</xref>). While these authors showed that a pre-trained model can be applied for classification on a small microchip within a reasonable amount of time, they did not investigate if it is feasible to train ML models on such hardware, which would be required if a model needs recalibration due to small fluctuations in the positioning of the recording electrodes. However, retraining the model is more challenging in this context as it requires more computational resources than using a pre-trained for inference, i.e., classifying brain activity. <xref ref-type="bibr" rid="ref39">Petschenig et al. (2022)</xref> addressed this challenge by benchmarking algorithms for classifying whisker deflection amplitudes based on MEA recordings from the barrel cortex of an anesthetized rat. Among the benchmarked methods, the Liquid State Machine (LSM) algorithm turned out to be highly accurate, and this is particularly relevant since it implements a Spiking Neural Network (SNN) developed explicitly for neuromorphic hardware. Hardware of this type are small, brain-inspired computing architectures designed for processing brain signals in a highly efficient manner, representing an appropriate option for chronically implanted prostheses (<xref ref-type="bibr" rid="ref8">Buccelli et al., 2019</xref>; <xref ref-type="bibr" rid="ref69">Werner et al., 2016</xref>).</p>
<p>The barrel cortex has, therefore, served as an ideal experimental model for investigating which computational approaches are appropriate for a neural prosthesis. The above-summarized developments in the classification of the whisker stimulus intensity based on stimulus-evoked activity from the barrel cortex are highly promising for the field of neuroprosthetics, as they demonstrate the ability to implement BMIs or BMBIs in low-powered hardware and their capability to decode information from neural activity. However, these results are based on recordings under anesthesia, which is known to affect the pattern of neural activity (<xref ref-type="bibr" rid="ref57">Steriade et al., 1993</xref>). Also, even in the awake state, cortical responses are known to differ depending on whether an animal is actively engaged in a sensory processing task or merely exposed to sensory stimulation without the need to react to it (<xref ref-type="bibr" rid="ref36">Otazu et al., 2009</xref>; <xref ref-type="bibr" rid="ref10">Carcea et al., 2017</xref>; <xref ref-type="bibr" rid="ref16">De Franceschi and Barkat, 2021</xref>). Thus, it remains unclear if ML classifiers also perform well on data recorded from awake and behaving animals. Such data is expected to contain both a higher level of and stronger variability in spontaneous activity compared to the anesthetized state, and therefore, it could be more challenging for the algorithms to detect or classify stimulus-evoked activity. Moreover, whether such models can be transferred from one individual to another is unknown. Such generalizable models would allow for training models on larger datasets composed of recordings from more than one individual, making the models more robust.</p>
<p>In this study, we benchmark lightweight ML algorithms on the task of whisker stimulus detection based on an electrophysiological dataset obtained from the barrel cortex of four mice while they performed a behavioral task. Using recordings from several sessions with different mice allowed us to clearly separate data from one individual for model training and then use this data for model evaluation, providing a clear picture of how these models generalize across individuals.</p>
<p>Relevant to the application of such models, we benchmark their runtime and investigate the impact of including data from multiple cortical layers. For the Liquid State Machine, the fastest and one of the best-performing algorithms, we analyzed how detection accuracy changes for detecting stimuli with different intensities and shorter MEA recordings.</p>
</sec>
<sec sec-type="results" id="sec2">
<title>Results</title>
<sec id="sec3">
<title>Dataset</title>
<p>The dataset was obtained from the barrel cortex of four head-fixed water-restricted mice while performing a go/no-go whisker stimulus detection task. Mice were trained to respond to whisker stimulation by licking at a water spout (<xref ref-type="fig" rid="fig1">Figure 1A</xref>). During experimental sessions, a silicon probe with two shanks, 32 electrodes each, was used to obtain electrophysiological recordings from cortical layers (L) 2/3, 4, 5, and 6. Based on a current-source-density (CSD) analysis, we identified different cortical layers and assigned all electrodes to their corresponding layer, selecting the electrode that was most central to derive a layer-specific signal from the MEA recordings (see Methods). Using the signals from different layers allowed us to investigate which layer provides the most important information for stimulus detection and whether integrating the signals from all layers leads to more accurate detection.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Experimental setup and psychometric curves. <bold>(A)</bold> Head-fixed mouse within reach of a water spout. Multi-electrode-array recordings are obtained from the cortical column associated with the stimulated whisker C1. A silicon probe with 64 electrodes, 32 on each of two shanks, was used to measure activity in all cortical layers, as shown in the sketch on the right-hand side panel, with barrels indicated in L4. For histological image examples, we refer to our previous experiments, in which shanks were only inserted after their location had been marked (<xref ref-type="bibr" rid="ref75">Yeganeh et al., 2022</xref>). <bold>(B)</bold> Overview of the behavioral task. Mice were conditioned to respond to whisker stimulation by licking at the water spout. Responding to stimulus trials (but not catch trials) within 500&#x202F;ms was rewarded with a drop of water. We refer to the time window from &#x2212;400&#x202F;ms to 0, relative to stimulus onset, as the pre-stimulus, or spontaneous, activity. The peri-stimulus or evoked activity is represented by the time window 0 to +100&#x202F;ms as the stimulus is a sinusoidal whisker vibration of 100&#x202F;ms duration. A hit trial, a stimulus trial with a successful mouse response, is defined as a lick within the window of opportunity (WOP) from +100 to +500&#x202F;ms. Trials with licks between 0 and +100&#x202F;ms were excluded as licking activity caused strong artifacts in the LFP. <bold>(C)</bold> Psychometric curve averaged across 19 sessions from 4 mice. Only trials that remain after all filtering criteria are applied are considered (see Methods). Whisker stimulus intensity ranged from 0% (no stimulus) over 20, 40, 60, and 80&#x2013;100% intensity (percentage of maximum deflection amplitude). Trials without stimulus (0% intensity) are called catch trials and were included to estimate the response rates achieved by random licks. Licks within the WOP in catch trials are called false alarms. Error bars show the standard error of the mean (SEM) computed over sessions. Subpanels in A were modified from (<xref ref-type="bibr" rid="ref71">Yamashita et al., 2018</xref>).</p>
</caption>
<graphic xlink:href="fnins-20-1605209-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Panel A presents a diagram of a rodent head with whisker C1 highlighted, indicating water reward and whisker stimulation, alongside a schematic of barrel cortex mapping and electrode placement in cortical columns, emphasizing the C1 barrel in layer IV. Panel B outlines an experimental timeline showing intertrial, silence, stimulus, and water reward periods with graphical traces for whisker stimulation, licks, and water reward. Panel C displays a line graph of probability of response versus stimulus intensity, comparing full session, first half, and second half, showing response rates increase with stimulus intensity.</alt-text>
</graphic>
</fig>
<p>In total, 19 experimental sessions were performed with 300 trials each. Each trial of the behavioral task consisted of different epochs (<xref ref-type="fig" rid="fig1">Figure 1B</xref>). Before whisker stimulation, the mice had to refrain from licking for at least 400&#x202F;ms (silence period). Since stimuli were only presented when animals refrained from licking for at least 0.4&#x2013;3&#x202F;s (varying randomly from trial to trial), we were able to use the 400&#x202F;ms before stimulus onset, called <italic>pre-stimulus</italic>, to characterize spontaneous activity in the barrel cortex. The stimulus itself consisted of a single-whisker 60-Hz sinusoidal vibration of 100&#x202F;ms duration, delivered in the rostrocaudal direction to the C1 whisker. We used this <italic>peri-stimulus</italic> time window to analyze stimulus-evoked activity. If the mouse responded within 500&#x202F;ms after stimulus onset, it was rewarded with a water droplet. Each recording session included 50 catch-trials without stimulation (0% intensity to assess spontaneous licking) and stimulus trials with different intensities ranging from 20 to 100% of maximum amplitude (corresponding to amplitudes between 93 and 463&#x202F;&#x03BC;m and angular velocities ranging from 324 to 1,600&#x202F;&#x00B0;/s at 2&#x202F;mm from the whisker base, respectively; see <xref ref-type="bibr" rid="ref65">Vandevelde et al., 2023</xref>, for more details). Stimuli in this range have been found to induce weak to moderately strong activity in barrel cortex and therefore span the range of barely to easily detectable stimuli (<xref ref-type="bibr" rid="ref72">Yang et al., 2016</xref>; <xref ref-type="bibr" rid="ref65">Vandevelde et al., 2023</xref>). Each stimulus was presented 50 times, and the stimulus sequence was randomized. The use of different intensities allowed us to investigate the response rate of mice during the behavioral experiment, as well as investigate the shape and strength of the evoked response in the barrel cortex. Using a randomized sequence was important to prevent mice from learning any patterns in whisker stimulation, which could potentially bias the response. The response rate increased monotonically with increasing stimulus intensity (<xref ref-type="fig" rid="fig1">Figure 1C</xref>), which is expected considering similar experiments (<xref ref-type="bibr" rid="ref58">St&#x00FC;ttgen et al., 2006</xref>; <xref ref-type="bibr" rid="ref59">St&#x00FC;ttgen and Schwarz, 2008</xref>; <xref ref-type="bibr" rid="ref60">St&#x00FC;ttgen and Schwarz, 2010</xref>).</p>
<p>For more details about the behavioral experiment, we refer to our previous publication (<xref ref-type="bibr" rid="ref65">Vandevelde et al., 2023</xref>).</p>
</sec>
<sec id="sec4">
<title>Setup of machine learning classifier benchmark</title>
<p>As we integrated data from different recording sessions, we could not use the spiking activity as each session provides a different set of single- and multi-units, which precludes the assembly of a common feature vector to describe trials across sessions, a prerequisite for the ML algorithms. Besides this, it has been demonstrated on a single-session dataset that (i) LFP features are as informative as features derived from multi-unit activity and (ii) it is not necessary to derive more sophisticated features from the LFP, such as the response peak amplitude or response onset latency, because using the raw signal provides sufficient information for the ML methods used (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>). In general, the LFP recording is more stable in comparison to action potentials, and therefore, it is recommended to use the LFP in the context of neuroprosthetics (<xref ref-type="bibr" rid="ref1">Andersen et al., 2004</xref>; <xref ref-type="bibr" rid="ref31">Markowitz et al., 2011</xref>). Following these previous studies, we use the raw LFP traces at 1,000&#x202F;Hz resolution, low-pass filtered at 150&#x202F;Hz but without further feature engineering, referring to them as <italic>RAW</italic> features, hereafter. Additionally, we investigate <italic>FFT</italic> features, consisting of the frequency components we obtain by applying the Fast Fourier Transform to the filtered signal (<xref ref-type="bibr" rid="ref7">Brigham, 1988</xref>). Including the FFT features was inspired by <xref ref-type="bibr" rid="ref51">Sederberg et al. (2019)</xref>, who demonstrated that field potentials in the barrel cortex can be informative with respect to how whisker stimuli are perceived in the barrel cortex of awake mice.</p>
<p>We chose to use the LFP for two purposes: first, stimulus detection (SD), i.e., to decode whether a stimulus was presented to the animal, and second, response prediction (RP), i.e., to predict whether the animal was responding to the whisker stimulus. To that end, we benchmarked six ML classification algorithms, also called <italic>classifiers</italic>, known for good performance in various applications (see <xref ref-type="table" rid="tab1">Table 1</xref> for an overview of the selected classifiers). The Decision Tree (DT) achieves short runtimes due to its simplicity (<xref ref-type="bibr" rid="ref28">Loh, 2011</xref>). Compared to a single decision tree, Random Forest (RF), which makes use of an ensemble of decision trees, and XGBoost (XGB), which uses stacked decision trees, are known to provide more accurate models while maintaining low runtime requirements (<xref ref-type="bibr" rid="ref6">Breiman, 2001</xref>; <xref ref-type="bibr" rid="ref12">Chen and Guestrin, 2016</xref>). Similar characteristics are associated with generalized linear models (GLM) that are, once trained, very fast during inference as they rely on a linear combination of weighted variables, so the logistic regression, a GLM with underlying logit function, has been included in the benchmark (<xref ref-type="bibr" rid="ref25">Kleinbaum and Klein, 2010</xref>). Interestingly, comparable studies investigating brain activity classification did not explore algorithms developed for time series classification. We, therefore, included ROCKET (RCKT), a classification algorithm dedicated to time series input (<xref ref-type="bibr" rid="ref17">Dempster et al., 2020</xref>; <xref ref-type="bibr" rid="ref37">Pantiskas et al., 2022</xref>). As mentioned above, spiking neural networks are promising due to their ability to run efficiently on low-powered neuromorphic hardware, which motivated the inclusion of the Liquid State Machine (LSM). More complex artificial neural network architectures, such as long short-term memory (LSTM), have not been explored due to their high computational requirements and because they did not provide better models in comparable studies (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Overview of ML classifiers used in the benchmark.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th>Algorithm</th>
<th align="left" valign="top">Runtime requirements</th>
<th align="center" valign="top">Developed for time series</th>
<th align="left" valign="top">Application examples related to neuroprosthetics</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">DT</td>
<td align="left" valign="top">Low</td>
<td/>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref55">Srimaharaj and Chaisricharoen (2021)</xref> and <xref ref-type="bibr" rid="ref14">Chikkudu and Annamalai (2024)</xref></td>
</tr>
<tr>
<td align="left" valign="top">RF</td>
<td align="left" valign="top">High</td>
<td/>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref4">Bhadra et al. (2025)</xref> and <xref ref-type="bibr" rid="ref20">Giri et al. (2024)</xref></td>
</tr>
<tr>
<td align="left" valign="top">XGB</td>
<td align="left" valign="top">Medium</td>
<td/>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref64">Tiwari and Chaturvedi (2019)</xref> and <xref ref-type="bibr" rid="ref24">Khanday et al. (2024)</xref></td>
</tr>
<tr>
<td align="left" valign="top">RCKT</td>
<td align="left" valign="top">Medium</td>
<td align="center" valign="top">&#x2713;</td>
<td align="left" valign="top">
<xref ref-type="bibr" rid="ref33">Menon et al. (2023)</xref>
</td>
</tr>
<tr>
<td align="left" valign="top">GLM</td>
<td align="left" valign="top">Low</td>
<td/>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref20">Giri et al. (2024)</xref> and <xref ref-type="bibr" rid="ref14">Chikkudu and Annamalai (2024)</xref></td>
</tr>
<tr>
<td align="left" valign="top">LSM</td>
<td align="left" valign="top">Low</td>
<td/>
<td align="left" valign="top"><xref ref-type="bibr" rid="ref39">Petschenig et al. (2022)</xref> and <xref ref-type="bibr" rid="ref3">Behrenbeck et al. (2019)</xref></td>
</tr>
</tbody>
</table>
</table-wrap>
<p>All algorithms were compared based on different classification scenarios. The term <italic>classification scenario</italic> is here used to describe a specific combination of which classification task (SD or RP) is considered and which features are used, further specifying the scenario by type (RAW, FFT), cortical layer, and time window from which the recording was derived (<xref ref-type="fig" rid="fig2">Figure 2A</xref>). For each algorithm, hyperparameter tuning was applied by evaluating a grid of parameter settings, also called grid search, using 3-fold cross-validation on the training data (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table T1</xref> for details). Furthermore, the model evaluation was carefully done, splitting up experimental trials based on individual mice to ensure cross-individual validation in a way that trials from one individual are separated by the training set, used for hyperparameter tuning and training the classification model and testing set, used to evaluate the model performance (<xref ref-type="fig" rid="fig2">Figure 2B</xref>). Training&#x2013;testing splits have been repeated four times, using four individuals and for each split, 10 bootstraps were drawn to increase the reliability of the aggregated accuracy measures. For each bootstrapping, the size of the training and testing set differs because the number of experimental sessions available differs between mice (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table T2</xref> for details about the set sizes).</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>Machine learning features and setup of the benchmark. <bold>(A)</bold> Data flow for the machine learning classification scenarios. Four electrodes were selected to derive the LFP for different cortical layers. The time windows, PRE-stimulus, PERI-stimulus, or both (FULL), and the feature type specified the ML features. The classification label specifies the task of the algorithms, which was either response prediction (RP) or stimulus detection (SD), the latter being the focus of this study. Using different combinations of layer, time window, feature type, and classification label results in 24 different classification scenarios. <bold>(B)</bold> Essential for the benchmark was splitting experimental trials into training and testing sets. The training set was used to explore the hyperparameter settings of the machine learning algorithms. Given one algorithm, multiple hyperparameter settings were evaluated by a grid search using a three-fold cross-validation. The setting providing the best results in the grid search was used to train a model based on the full training set and eventually validated using the testing set. The testing set was created using data from only one mouse to ensure that trials from the same individual or experimental session were not included in both training and testing sets. This was repeated for four mice. Given one training&#x2013;testing split, 10 bootstrap samples were drawn from the training set to train and evaluate multiple models. Given one classification scenario and one algorithm, this results in 40 model evaluations, increasing the statistical power of aggregated measurements for model accuracy. Feature values for RAW and FFT were linearly scaled using Min-Max Scaling (scaling values to the range [0, 1]). The minimum and maximum were computed from the training set only, then used to scale feature values in the training and testing sets.</p>
</caption>
<graphic xlink:href="fnins-20-1605209-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Panel A shows a flowchart for neural data analysis with 32 electrodes across cortical layers L2/3 to L6, selecting data from a single or all layers and different time windows (PRE, PERI, FULL), extracting features (RAW, FFT), and classification labels (RP, SD). Panel B illustrates an experimental design with mice, describing how the training set is bootstrapped and used for parameter tuning via three-fold cross-validation, with the model evaluated on a testing set testing set after it has been retrained on the training set using the best parameter setting.</alt-text>
</graphic>
</fig>
<p>In all classification scenarios, we used perfectly balanced datasets, meaning each subset contained the same number of positive trials (stimulus trial or response trial) and negative trials (catch trial or no-response). This was achieved by down-sampling trials from the overrepresented, and due to the expected 50% positives in the data, we can assume that the baseline accuracy achieved by random predictions converges to 50%. Note that including some scenarios was mainly motivated by creating a baseline scenario in which a low accuracy (close to 50%) is expected. For instance, we would not expect pre-stimulus LFPs to encode information useful for stimulus detection. Hence, an accuracy of 50% would be expected for this scenario from any classifier. For response prediction, the expectation can be slightly different. Even though the barrel cortex is not involved in decision-making, its spontaneous activity might encode different states related to attention and excitement, which is relevant to the behavioral experiment (<xref ref-type="bibr" rid="ref15">Cowley et al., 2020</xref>). Hence, it might be possible for classifiers to leverage weak patterns predictive for the response, even from pre-stimulus (spontaneous) activity.</p>
</sec>
<sec id="sec5">
<title>Machine learning classifier benchmark</title>
<p>The first analysis addresses the question of the extent to which ML classification algorithms, also called <italic>classifiers</italic>, can leverage information from LFP features to train accurate classification models for response prediction (RP) or stimulus detection (SD) and which classifier trains the most accurate models.</p>
<p><xref ref-type="fig" rid="fig3">Figure 3</xref> provides an overview of the accuracy obtained for all classifiers (innermost <italic>y</italic>-axis) applied to the classification scenarios, specified by the two classification tasks (SD, RP), the time window for the recording (PRE, PREI, FULL), feature type (RAW, FFT), cortical layers (innermost <italic>x</italic>-axis), and the stimulus intensities used for trial selection (see subtitles). Brighter colors represent higher accuracy observed for SD in general. Using the PERI and FULL time window results in higher accuracy for both tasks, SD and PR. Regarding the feature types, the RAW features are more informative than the FFT, shown by higher accuracy for both SD and RP. The lower half of the heatmap shows accuracy measures obtained for SD, for which a higher level of accuracy can be observed in general. The accuracy for SD reached up to 90% when trials with 0% (catch-trials) and maximum stimulus intensity of 100% were contrasted.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption>
<p>Machine learning benchmark. Accuracy represents the classification performance of six different algorithms in several supervised classification scenarios, mainly defined by the task of stimulus detection (SD) and response prediction (RP). These are specified by the time window before (PRE) or after (PERI) stimulus onset or the complete time window (FULL), concatenating the PRE- and PERI-stimulus LFP signal. Furthermore, there are different feature types, RAW and FFT, which were derived from the LFP measured in different cortical layers (L2/3, L4, L5, L6) or using a concatenation of feature vectors from all layers (ALL). The underlying datasets contain either trials from all intensities or only catch trials (0%) and full intensity (100%) trials. The asterisks indicate which classifier achieved the best accuracy.</p>
</caption>
<graphic xlink:href="fnins-20-1605209-g003.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Heatmap showing accuracy for two conditions, &#x201C;ALL Intensities&#x201D; and &#x201C;Only 0% and 100%,&#x201D; comparing models and features for response prediction and stimulus detection tasks. Color represents accuracy from 0.50 to 0.90, with higher values brighter. Asterisks mark the highest accuracy achieved for the layer-specific features. Models appear on the y-axis, features and cortical layers on the x-axes, and a color bar, and a color bar indicates accuracy scale.</alt-text>
</graphic>
</fig>
<p>Features derived from L4 are more informative than those derived from other layers. Considering a feature vector combining all layers (ALL on the <italic>x</italic>-axis) slightly improves the accuracy over the performance of L4 alone. As expected, the peri-stimulus activity (PERI), describing the stimulus-evoked response in the LFP, is more informative for both classification tasks in comparison to the pre-stimulus activity. Classification models trained on pre-stimulus activity features are unable to perform accurately for SD, as expected. For RP, however, the accuracy slightly increased over the 50% baseline when RF is used on pre-stimulus FFT features (<xref ref-type="fig" rid="fig3">Figure 3</xref>).</p>
<p>Regarding the classifiers, RF performs well in scenarios exploring FFT features. For the RAW features that are more informative for the SD task, we observed that the GLM and LSM achieved the highest performance, with RCKT slightly lower. We do not see an improvement in model performance for SD when integrating the FULL time window over using peri-stimulus alone (<xref ref-type="fig" rid="fig3">Figure 3</xref>).</p>
<p>However, from the median accuracies discussed in this section, it is not clear if it is worth integrating all layers, expected to increase model training time, over using solely L4. Moreover, a runtime analysis is required for the best algorithms, considering the practical application of these models. Therefore, a more detailed comparison of the most accurate algorithms follows, focusing on SD based on the peri-stimulus RAW features.</p>
</sec>
<sec id="sec6">
<title>Detailed comparison of the most accurate algorithms</title>
<p>For this comparison of the best-performing classifiers, the SD classification task has been selected with 0 and 100% intensity trials only, because the highest classification accuracy was achieved in this scenario. Considering GLM, RCKT, and LSM as the top-performing classifiers in terms of accuracy, the LSM is the strongest competitor based on the runtime comparison (<xref ref-type="fig" rid="fig4">Figure 4A</xref>). RCKT was faster than the GLM for model training but much slower during inference. In general, the runtime for model training and inference was higher when all layers were used, which is expected as algorithms are processing four times more ML features than using only a single layer. Focusing on LSM and GLM as the fastest algorithms, we investigated the difference in accuracy for the SD task using all layers and only L4 (<xref ref-type="fig" rid="fig4">Figure 4B</xref>). In this comparison, the LSM achieved the lowest performance when all layers were integrated, which suggests that the GLM is more efficient in dealing with high-dimensional data as it achieved the highest median accuracy overall when it integrated all layers. However, as the accuracy achieved by the GLM with all layers is not significantly higher than that of the LSM with only L4, we conclude that the LSM is the best choice for stimulus detection, especially due to its superior performance in the runtime comparison with the lowest model training and inference runtimes using only L4. Furthermore, it achieved slightly more robust accuracy distributions across all individuals than the GLM, for which more outliers and lower median accuracy measures were observed overall (<xref ref-type="fig" rid="fig4">Figure 4C</xref>). To assess whether the accuracy scores of the GLM and LSM (<xref ref-type="fig" rid="fig4">Figure 4B</xref>) differed significantly, the Wilcoxon signed-rank test was applied on 40 pairs (four mice times ten bootstraps). A pair of values consists of the accuracy values from the two classifiers, obtained on the same mouse-specific testing set and the same bootstrap sample, using random seeds to ensure bootstrap samples are equal.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption>
<p>More detailed comparison of the most accurate and fastest algorithms. The scenario selected for these detailed visualizations is the SD using RAW features based on 0 and 100% intensity trials only. <bold>(A)</bold> The computational time was measured during model training and when a trained model was applied to samples from the testing set to classify those (inference). As training and testing sets differ in size depending on which mouse was used to produce the split, the runtime, measured in seconds, was normalized using the size of the training and testing sets, respectively. Note that the range of the <italic>y</italic>-axis differs between the subplots and that the <italic>y</italic>-axis is on a logarithmic scale. <bold>(B)</bold> Comparison of the accuracy distribution achieved by GLM and LSM using the raw LFP (see RAW in <xref ref-type="fig" rid="fig3">Figure 3</xref>) derived from layer L4 or concatenating all layers. The asterisks indicate significant differences with &#x002A; &#x2192;<italic>p</italic> value&#x202F;&#x003C;&#x202F;0.05 and &#x002A;&#x002A; &#x2192;<italic>p</italic> value&#x202F;&#x003C;&#x202F;0.01 using the Wilcoxon signed-rank test. Distributions are not significantly different if nothing is indicated. <bold>(C)</bold> Classification performance of GLM and LSM for the different training, testing splits based on four mice. Boxplots describe the distribution of accuracies measured on 10 bootstrap samples. Diamonds show the outliers of the boxplot.</p>
</caption>
<graphic xlink:href="fnins-20-1605209-g004.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Figure with three box plot panels comparing performance metrics of algorithms RCKT (orange), GLM (purple), and LSM (blue). Panel A shows computational cost for model training and inference across &#x201C;only Layer 4&#x201D; and &#x201C;ALL Layers,&#x201D; with GLM generally having higher training costs and LSM lower inference costs. Panel B shows accuracy for GLM and LSM across &#x201C;only Layer 4&#x201D; and &#x201C;ALL Layers,&#x201D; indicating higher accuracy in &#x201C;ALL Layers&#x201D; and with significant differences noted by asterisks. Panel C shows accuracy per mouse (M1 to M4) for GLM and LSM algorithms, with similar trends between models but some variation across mice.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec7">
<title>Stimulus detection for different intensities using shorter peri-stimulus LFP signals</title>
<p>For practical applications, it is important to know how accurate the detection is for stimuli of different intensities (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>; <xref ref-type="bibr" rid="ref68">Wang et al., 2018b</xref>). Furthermore, it is crucial to investigate how long the LFP recordings need to be to maintain accuracy at the highest level possible. The shorter this recording time is, the earlier the model can be applied after stimulus onset. To address these two aspects, an additional SD experiment was performed using subsets of the data with catch-trials and stimulus trials of intensities from 20 to 100%. Note that this includes the results from the benchmark obtained for 100% stimulus intensity and the complete 100&#x202F;ms of peri-stimulus activity. These SD experiments were further specified by different lengths of the RAW LFP signal as input for the classification model, using L4 as the most informative layer. All additional classification experiments were done for the LSM using the validation strategy applied during the initial ML benchmark analysis (<xref ref-type="fig" rid="fig2">Figure 2B</xref>).</p>
<p>We observed that LSM&#x2019;s detection accuracy is lower for stimuli of low intensities (<xref ref-type="fig" rid="fig5">Figure 5</xref>). In comparison to the accuracy achieved on the complete (100&#x202F;ms) peri-stimulus LFP, approximately 35&#x2013;40&#x202F;ms is required to achieve a high level of accuracy for the higher intensities greater than or equal to 60%. For 40% intensity, even though the overall accuracy was much lower than for higher intensities, the LFP can be as short as 15&#x202F;ms to achieve high accuracy for this case. Note that the RAW features were used for these tests, representing the LFP after low-pass and band-stop filtering, which further delays this process by a total of 0.2&#x202F;ms (measured on a common CPU, expected to be slightly higher on neuromorphic hardware).</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption>
<p>Accuracy of stimulus detection for different intensities using shorter peri-stimulus LFP signals. Accuracy for stimulus detection when the LSM is trained and evaluated to differentiate catch trials (0% intensity) from stimulus trials of different intensities (20&#x2013;100%). Each classification experiment was repeated using shorter LFP recordings derived from the peri-stimulus signal, as shown on the <italic>x</italic>-axis. The lines describe the mean accuracy based on 40 training&#x2013;testing splits, and the error bars describe the standard error of the mean.</p>
</caption>
<graphic xlink:href="fnins-20-1605209-g005.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Line graph showing accuracy for stimulus detection on the y-axis and length in milliseconds of peri-stimulus LFP on the x-axis, with five lines representing stimulus intensities from 20 percent to 100 percent. Accuracy increases steeply with both length and intensity, plateauing after 15 milliseconds, with highest accuracy for 100 percent intensity and lowest for 20 percent intensity.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec8">
<title>Imbalanced data and model calibration</title>
<p>To mitigate potential impacts of class imbalance (over- or underrepresentation of the positive class), datasets have been downsampled to train models on data with 50% positive (stimulus present) and 50% negative (stimulus absent) samples. For deployment, however, it is important to investigate how algorithms perform on imbalanced data. Using L4 as the most informative layer and RAW as the most informative feature type for stimulus detection, we benchmarked the GLM and LSM again and added comparisons on data without downsampling. We used the dataset with all stimulus intensities, resulting in 83% stimulus trials, and the dataset containing only 0% stimulus intensity (catch) trials and 100% stimulus intensity trials, for which downsampling had almost no impact, as the fraction of positives changed from 50 to 49.3%. Due to the class imbalance in the data, we added ROC and Precision-Recall curves to evaluate model performance (<xref ref-type="bibr" rid="ref49">Saito and Rehmsmeier, 2015</xref>). Additionally, we used the Brier loss score to evaluate model calibration and studied the impact of Platt Calibration on the model probabilities from GLM and LSM (<xref ref-type="bibr" rid="ref41">Platt, 1999</xref>; <xref ref-type="bibr" rid="ref34">Niculescu-Mizil and Caruana, 2005</xref>).</p>
<p>The results from this analysis (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S1</xref>) show that (i) the GLM is well-calibrated and does not benefit from Platt Calibration, (ii) the LSM needs Platt Calibration to improve its calibration performance, especially for imbalanced data, and (iii) the overall performance of GLM and LSM is very similar, both achieving area under ROC and Precision-Recall curve clearly better than a random guess or na&#x00EF;ve model.</p>
<p>Note that testing data is not used for Platt Calibration. A default Logistic Regression (scikit-learn) is trained as a calibration model on the classification probabilities and class labels of a 33% hold-out dataset from the training set. Consequently, the model evaluated on the mouse-specific testing set has been fitted on only 67% of the training set when using Platt Calibration. The fitted calibration model is applied to transform the classification probabilities from the testing set before computing the evaluation metrics.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec9">
<title>Discussion</title>
<p>Three ML classification algorithms were identified as the best-performing ones, achieving the highest accuracy for stimulus detection. In a subsequential runtime analysis, the LSM has been superior in achieving the lowest runtimes for model training and inference. Hence, the LSM has not only been faster within the runtime analysis performed on an ordinary CPU but it is also expected to be more efficient when deployed on neuromorphic hardware as it, in contrast to other machine learning algorithms, mirrors the physical architecture of low-powered chips used in such hardware. Accordingly, the LSM could even be used to train a model on the prosthesis, which is essential for recalibrating a model by retraining after it has been implanted (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>). Recalibration of a model can be necessary if the electrodes measuring the input signal are slightly moved and small shifts in the signal are expected.</p>
<p>Furthermore, the results of this study show that the classification models are robust across four mice from which the data was derived, with the LSM being slightly more robust than the GLM. This is promising as models could potentially be trained on data collected from a large cohort of patients and then deployed in new patients. When the model is used for classification on low-powered hardware, the low inference times are paramount. While the LSM and GLM were much faster when applying the model (inference) than training it, the ROCKET classifier showed similar runtime requirements during training and inference. This is explained by ROCKET&#x2019;s algorithmic strategy of applying convolutional kernels to the time series signal before using a sample as input. This signal preprocessing is required during training and when a trained model is used. Hence, this requires additional computational resources during inference.</p>
<p>The runtime analysis also showed that all algorithms require less computational resources when only one layer is used. Even though the GLM achieved the highest stimulus detection accuracy when the signal of all layers was integrated, its accuracy was not significantly higher than that of the LSM when applied with only L4. Observing L4 as the most informative layer within the barrel cortex is not unexpected, as it is the main recipient of the thalamic input about whisker deflection, and multi-unit activity in this layer features higher neurometric detection sensitivity than in other layers (<xref ref-type="bibr" rid="ref65">Vandevelde et al., 2023</xref>; <xref ref-type="bibr" rid="ref44">Reyes-Puerta et al., 2015</xref>; <xref ref-type="bibr" rid="ref43">Reyes-Puerta et al., 2015</xref>). The signal is expected to become noisier as it is forwarded to other layers of barrel cortex, which also receive information from other cortical areas, and may integrate this information with texture information provided by consecutive whisker-object contacts for routing to action-related cortical areas (<xref ref-type="bibr" rid="ref76">Zuo and Diamond, 2019</xref>; <xref ref-type="bibr" rid="ref19">G&#x0103;m&#x0103;nu&#x0163; et al., 2018</xref>). The redundancy of whisker information represented in different cortical layers, along with (apparent) noise added from other brain areas, could explain why the LFP in layers other than L4 does not strongly enhance stimulus detection (<xref ref-type="bibr" rid="ref2">Arabzadeh et al., 2006</xref>). Additionally, using a concatenation of feature vectors from all layers results in high-dimensional data, characterized by a high number of features but a low sample size, which can lead to less accurate stimulus detection models, as ML algorithms are less effective in capturing meaningful patterns in the large number of features (<xref ref-type="bibr" rid="ref22">Horenko, 2020</xref>; <xref ref-type="bibr" rid="ref66">Vecchi et al., 2022</xref>). We conclude that layers other than L4 can provide additional information to slightly improve the SD accuracy, as shown by the GLM, but that these improvements are too small to justify choosing the GLM over the LSM, being much faster and only slightly less accurate, when using solely L4.</p>
<p>Besides the runtime of the machine learning model itself, it is also important to know how long the LFP signal needs to be for accurate stimulus detection. We, therefore, analyzed the classification performance for stimulus detection using shorter RAW feature vectors and found that high accuracy, as observed for 100&#x202F;ms of the peri-stimulus signal, can be maintained using only 35&#x2013;40&#x202F;ms of the signal. For practical applications, this means that a delay by this time frame should be expected before a stimulus can be detected and processed in a BMI or BMBI device. This analysis has also been done for different intensities and reveals that accuracy decreases with decreasing intensity. This is a critical observation, indicating that only strong stimuli are detected reliably. At the same time, it reflects the perception of mice as they also showed a lower response rate for weaker stimuli during the behavioral experiment (<xref ref-type="fig" rid="fig1">Figure 1C</xref>), and because generally, whisker stimulus intensities were relatively low compared to the full range of possible contacts during unrestrained locomotion (<xref ref-type="bibr" rid="ref47">Ritt et al., 2008</xref>).</p>
<p>Generally, an LFP signal describing the evoked response of a whisker stimulus in the barrel cortex is characterized by a strong drop in the current approximately 5&#x2013;10&#x202F;ms post-stimulus onset (see example sessions in <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S2</xref>). This change in the signal seems to be strong and consistent enough that a classifier does not need to leverage complex, non-linear patterns, which is implied by the strong performance of the linear model (GLM) used in the benchmark. This might also explain why the RAW LFP signal is more informative than oscillatory patterns encoded by the FFT features. Comparing the LFP and FFT features of two sessions from our dataset revealed an increased amplitude for the frequency components describing 60&#x202F;Hz, and its harmonics can exist (<xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S2</xref>). However, these FFT patterns are inconsistent and only present if the electrode has been close to a cluster of velocity-sensitive neurons activated by the peak velocity for the whisker movement during stimulation. Remember that the vibrotactile stimulus was a cosine of 60&#x202F;Hz, moving the whisker in the anterior&#x2013;posterior direction. Importantly, in the primary afferent neurons of the trigeminal ganglion (the first station of the ascending whisker sensory pathway), activity in response to whisker deflection is strongly determined by stimulus velocity (<xref ref-type="bibr" rid="ref58">St&#x00FC;ttgen et al., 2006</xref>). This is also true for whisker-responsive neurons in the thalamus (<xref ref-type="bibr" rid="ref63">Temereanca and Simons, 2003</xref>) and barrel cortex (<xref ref-type="bibr" rid="ref40">Pinto et al., 2000</xref>).</p>
<sec id="sec10">
<title>Response prediction and the patterns in spontaneous activity</title>
<p>Besides investigating neural activity with respect to how whisker stimuli are processed in the barrel cortex, the dataset allowed us to search for patterns predictive of the behavioral response. This is a critical aspect as we record from a somatosensory brain region believed to be involved in the process of evaluating an incoming stimulus and provide the basis for sensory-driven decision-making (<xref ref-type="bibr" rid="ref56">Staiger and Petersen, 2021</xref>). Considering the response prediction analysis as part of the ML benchmark, we observe a moderate accuracy for response prediction models trained on the RAW features from the peri-stimulus or full signal (<xref ref-type="fig" rid="fig3">Figure 3</xref>). However, an additional analysis based on the predictions of the corresponding models strongly suggests that the response prediction models actually perform stimulus detection, which results in quite accurate models due to the strong correlation between the presence and absence of stimulus and the occurrence of behavioral response (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Figure S3</xref>).</p>
<p>Interestingly, our benchmark revealed that patterns exist in the harmonic oscillations derived from spontaneous (pre-stimulus) activity that are slightly predictive toward the response of mice (see RP with pre-stimulus FFT features, all intensities, <xref ref-type="fig" rid="fig3">Figure 3</xref>). For the FFT features derived from L2/3, L4 or L5 we observed accuracy slightly, but significantly better than the random baseline when all stimulus intensities are considered in the data (<italic>p</italic>-value from Fischer&#x2019;s test &#x003C; 1e<sup>&#x2212;20</sup> in all three cases). Even though this difference in accuracy is small, these results indicate that neuronal activity in the barrel cortex is also related to attention or anticipation of the expected whisker stimulus (<xref ref-type="bibr" rid="ref27">Lee and Dan, 2012</xref>; <xref ref-type="bibr" rid="ref61">St&#x00FC;ttgen and Schwarz, 2018</xref>). Such weak patterns in the spontaneous activity could reflect brain states in which the mouse is more attentive to perceive the stimulus, which impacts the response, especially when not only the full-intensity trials but also those with a small or intermediate intensity are analyzed. While these are indications relevant to the role of the barrel cortex in the behavioral response of mice, future work is required to further investigate the FFT features, which is out of scope for this study and might require further behavioral experiments.</p>
</sec>
<sec id="sec11">
<title>Practical application and limitations</title>
<p>Previous <italic>in vivo</italic> recordings in the rat barrel cortex have identified inhibitory interneurons in L4 as the units carrying the highest amount of sensory stimulus-related information (<xref ref-type="bibr" rid="ref44">Reyes-Puerta et al., 2015</xref>; <xref ref-type="bibr" rid="ref43">Reyes-Puerta et al., 2015</xref>). Thus, neuronal activity in L4 represents a good target to obtain information on the spatio-temporal properties of the sensory input. Measuring the activity from L4 is not only possible but also very reliable with invasive technologies as used in our experimental model. However, in a practical application, it will be more beneficial to use non-invasive technologies to avoid an intervention in the patient&#x2019;s brain. Resolving activity in a specific portion of tissue without having direct access could be realized by laminar inference based on MEG recordings, although MEG is not applicable in neuroprosthetics (<xref ref-type="bibr" rid="ref5">Bonaiuto et al., 2018</xref>).</p>
<p>Our study confirms that ML models can be highly accurate on whisker stimulus classification, even for LFP signals recorded from awake mice. However, the datasets used for such explorations, including the one we used, are based on precisely controlled single-whisker stimuli, certainly different from how whiskers bend and move when animals are actively palpating during unrestrained exploration (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>; <xref ref-type="bibr" rid="ref67">Wang et al., 2018a</xref>; <xref ref-type="bibr" rid="ref68">Wang et al., 2018b</xref>). Therefore, for future work, it would be relevant to apply and test the suggested algorithms on recordings from microchips implanted in freely moving animals. The hardware required to do this could use wireless and battery-free devices (<xref ref-type="bibr" rid="ref32">Martinez et al., 2018</xref>; <xref ref-type="bibr" rid="ref70">Won et al., 2023</xref>). Recordings from such experiments might result in LFP traces that are less distinguishable, as we would expect that whisker deflections from freely moving animals are less distinct.</p>
<p>Additionally, our benchmark for imbalanced data covers only the case of overrepresentation of the positive class, which is the opposite of a real-life scenario in which stimulus events are less frequent compared to non-stimulus recordings. Our dataset provided well-defined trials for events with an induced stimulus, with five different intensities over catch trials without a stimulus. Thus, using all trials of the dataset without downsampling results in overrepresentation of the positive class. Nevertheless, this analysis demonstrates that both algorithms perform well on imbalanced data in general and reveals that additional calibration of classification probabilities is required when using the LSM, particularly on imbalanced data. Interestingly, on the imbalanced data, the GLM and LSM achieved accuracy values close to those expected from a na&#x00EF;ve model that always returns the positive class (stimulus present). However, both algorithms yielded high area-under-the-curve scores when using the ROC and Precision-Recall curves as threshold-independent evaluation metrics. We therefore conclude that, in imbalanced deployment settings, both algorithms require decision threshold optimization, which is left for future work.</p>
<p>To realize the closed-loop system, the output of the ML classifier could be used to control an electrode located in another brain region to induce intracortical microstimulation that can convey artificial signals that may be as complex as the somatic perception of tactile properties from different objects (<xref ref-type="bibr" rid="ref23">Jackson et al., 2006</xref>; <xref ref-type="bibr" rid="ref35">O&#x2019;Doherty et al., 2011</xref>). However, a complete experimental setup would be required to verify whether the whole process of reading in the MEA recording, applying the ML classifier, and creating a feedback signal is fast enough to restore brain functionality, establishing timely signal processing with other cortical areas.</p>
</sec>
<sec id="sec12">
<title>Model robustness and generalizability</title>
<p>Another important aspect is the robustness of the findings concluded from our ML-based analyses. The sample size is a key factor in statistical analyses, and machine learning models tend to be more meaningful and generalizable when trained on large datasets (<xref ref-type="bibr" rid="ref11">Carlini and Wagner, 2017</xref>; <xref ref-type="bibr" rid="ref46">Rice et al., 2020</xref>). Even though electrophysiological recordings are challenging from different perspectives, we could explore a comparatively large dataset using &#x003E;3,400 trials for six stimulus intensities. The final subsets used for the ML-based analysis were restricted by a selection of trials for the different intensities, resulting in a sample size of at least 600 trials (see <xref ref-type="supplementary-material" rid="SM1">Supplementary Table T2</xref>), which is approximately three times higher than in comparable studies using only one recording session, e.g., (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>; <xref ref-type="bibr" rid="ref67">Wang et al., 2018a</xref>; <xref ref-type="bibr" rid="ref68">Wang et al., 2018b</xref>).</p>
<p>In summary, based on a large electrophysiological dataset enabling a robust model evaluation, we demonstrated that SNNs are accurate and efficient in interpreting neural activity toward neuroprosthetics applications related to the barrel cortex. Using the LSM classifier, model training could be realized on a small microchip to be implemented on the prosthesis, which can be beneficial to retrain a model for adjusting to small drifts of the implanted electrode, for instance. While our tests were done on an infrastructure with common CPUs with the main perspective to compare the model training time between the different classifiers, it has been shown recently that LSMs can efficiently be implemented and executed on microchips, as demonstrated on a DYNAP-SE neuromorphic processor (<xref ref-type="bibr" rid="ref39">Petschenig et al., 2022</xref>). Another opportunity is to train a model on an external machine, transferring the model and only applying it on the prosthesis (<xref ref-type="bibr" rid="ref68">Wang et al., 2018b</xref>). If future technologies enabled a more stable implantation of the electrodes, retraining on the prosthesis might not be necessary. Our generalizability analysis confirms that using a pre-trained model is possible even if the underlying data was derived from an individual different from the patient who needs the prosthesis.</p>
</sec>
</sec>
<sec sec-type="methods" id="sec13">
<title>Methods</title>
<sec id="sec14">
<title>Electrophysiological dataset</title>
<p>The data investigated for this study were obtained from experiments with head-fixed mice performing a go/no-go whisker stimulus detection task (<xref ref-type="bibr" rid="ref65">Vandevelde et al., 2023</xref>). Mice were water-restricted and learned that they were rewarded by a drop of water if licking within 500&#x202F;ms after the onset of a 100-ms sinusoidal vibration of a single whisker. Before mice were trained to perform this task, they were habituated to the head-fixed setting, which was important to enable the recording of neural activity in the cortical column in the barrel cortex associated with the stimulated whisker. This was done with multi-electrode-array (MEA) silicon probes through all cortical layers using 2-shank-64-channel probes with a distance between the shanks of either 200 or 250&#x202F;&#x03BC;m. (Neuro Nexus, Ann Arbor, United States, or Cambridge Neurotech, Cambridge, United Kingdom, respectively). Each shank has 32 electrodes, and the distance between electrodes is 25&#x202F;&#x03BC;m, which enables the observation of neural activity over an extent of ~0.8&#x202F;mm.</p>
<p>For more details about the mouse line, behavioral task training, habituation, surgery, MEA recordings, and the layer-electrode association using current source density (CSD) plots, we refer to <xref ref-type="bibr" rid="ref65">Vandevelde et al. (2023)</xref>. After inspecting the CSD plots, we used one electrode per layer to derive layer-specific LFPs from the MEA recordings. The location and extent of layer 4 could be easily identified by a prominent short-latency current sink in the CSD, computed from the average LFP response to repeated high-intensity C1 whisker deflections, and therefore could be separated from layers 2/3 above and layer 5 below. The border between layers 5 and 6 was estimated based on recording depth (for more details, see <xref ref-type="bibr" rid="ref65">Vandevelde et al., 2023</xref>, as well as <xref ref-type="bibr" rid="ref44">Reyes-Puerta et al., 2015</xref>; <xref ref-type="bibr" rid="ref73">Yang et al., 2017</xref>, <xref ref-type="bibr" rid="ref74">2018</xref>). Psychometric curves (<xref ref-type="fig" rid="fig1">Figure 1C</xref>) differ slightly from those shown in our previous study (<xref ref-type="bibr" rid="ref65">Vandevelde et al., 2023</xref>) because we excluded trials with a lick response within the first 100&#x202F;ms after stimulus onset as the muscle movement related to licking caused artifacts in the recordings.</p>
</sec>
<sec id="sec15">
<title>LFP preprocessing and LFP features</title>
<p>To exclude LFP traces contaminated by artifacts, we applied trial filters for the full range from &#x2212;420&#x202F;ms to +120&#x202F;ms according to the following criteria. Trials in which animals were presumably moving and/or licking randomly had large voltage fluctuations and sometimes burst-like oscillations in activity. Trials in which the signal reached saturation were identified by applying a cutoff of &#x00B1;2&#x202F;mV at any time point within the time range specified above. Bursts were identified by moving time windows of 20&#x202F;ms in which all electrodes strongly correlated, computed by Pearson&#x2019;s correlation (<xref ref-type="bibr" rid="ref50">Schober et al., 2018</xref>). Electric shorts were partly captured by the MEA electrodes, resulting in a continuous 50&#x202F;Hz sinusoidal signal, and trials with a strong 50&#x202F;Hz frequency power were removed. Trials for which a lick was detected within the above-mentioned time window were also excluded.</p>
<p>The RAW features describe the LFP trace at 1,000&#x202F;Hz resolution, resulting in 400 features describing, for instance, the pre-stimulus activity of 400&#x202F;ms from &#x2212;400&#x202F;ms to 0&#x202F;ms (stimulus onset). Hence, the peri-stimulus activity from 0 to +100&#x202F;ms is described by a feature vector of length 100, while 500 features represent the full time window from &#x2212;400&#x202F;ms to +100&#x202F;ms. Using causal Butterworth filtering, the LFP traces were low-pass filtered at 150&#x202F;Hz, and the 47&#x2013;53&#x202F;Hz band was removed, using a first-order low-pass and a sixth-order band-stop filter (<xref ref-type="bibr" rid="ref52">Selesnick and Burrus, 2002</xref>). Using the same time-windows, the Fast-Fourier-Transform (FFT) was applied to derive the strength of the frequency components (<xref ref-type="bibr" rid="ref7">Brigham, 1988</xref>). According to the length of the signal derived from the investigated time windows, the resolution is 2.5&#x202F;Hz and 10&#x202F;Hz going up to 147.5&#x202F;Hz and 140&#x202F;Hz for the pre- and peri-stimulus activity, respectively. The first FFT feature (0&#x202F;Hz) represents the constant, which is simply the average over the signal. Eventually, 60 pre-stimulus and 15 peri-stimulus features describe the FFT feature vectors, resulting in 75 features for the full range. Due to the strong differences between ongoing and evoked activity, the FFT was applied to those time windows separately, and the resulting FFT features were merged afterward in order to create the full (pre- and peri-stimulus) FFT feature vector.</p>
</sec>
<sec id="sec16">
<title>Soft- and hardware specifications</title>
<p>We used MOGON II, the high-performance computing system from the University of Mainz. The whole analysis was implemented in Python. We ran DT, RF, and GLM from the <italic>scikit-learn</italic> package (<xref ref-type="bibr" rid="ref38">Pedregosa et al., 2011</xref>) and XGB using a separate library called <italic>xgboost</italic> (<xref ref-type="bibr" rid="ref12">Chen and Guestrin, 2016</xref>). The algorithm ROCKET (RCKT) was integrated via the <italic>sktime</italic> package (<xref ref-type="bibr" rid="ref29">L&#x00F6;ning et al., 2019</xref>), implementing its univariate and multivariate variants (<xref ref-type="bibr" rid="ref17">Dempster et al., 2020</xref>; <xref ref-type="bibr" rid="ref37">Pantiskas et al., 2022</xref>). The LSM algorithm was used from the GitHub repository <ext-link xlink:href="https://github.com/IGITUGraz/LSM" ext-link-type="uri">https://github.com/IGITUGraz/LSM</ext-link>, published by <xref ref-type="bibr" rid="ref30">Maass et al. (2002)</xref>. The CPU specifications on the SMP compute nodes on MOGON are Intel&#x00AE; Xeon&#x00AE; E5 v4, 2.2GHz.</p>
</sec>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="sec17">
<title>Data availability statement</title>
<p>The datasets used, and the Python code performing the ML benchmark are publicly available in a gitlab repository: <ext-link xlink:href="https://gitlab.rlp.net/salbrec/sdrpml.git" ext-link-type="uri">https://gitlab.rlp.net/salbrec/sdrpml.git</ext-link>.</p>
</sec>
<sec sec-type="ethics-statement" id="sec18">
<title>Ethics statement</title>
<p>The animal study was approved by a local ethics committee (#23 177-07/G14-1-080) following the European and German national regulations (European Communities Council Directive, 86/609/EEC). The study was conducted in accordance with the local legislation and institutional requirements.</p>
</sec>
<sec sec-type="author-contributions" id="sec19">
<title>Author contributions</title>
<p>SA: Data curation, Visualization, Writing &#x2013; original draft, Formal analysis, Validation, Methodology, Software, Investigation, Conceptualization, Writing &#x2013; review &#x0026; editing. JV: Validation, Conceptualization, Formal analysis, Data curation, Investigation, Writing &#x2013; review &#x0026; editing. EV: Formal analysis, Visualization, Software, Writing &#x2013; review &#x0026; editing, Validation. GB: Software, Formal analysis, Writing &#x2013; review &#x0026; editing, Visualization, Validation. DB: Formal analysis, Visualization, Software, Writing &#x2013; review &#x0026; editing, Validation, Investigation. MS: Resources, Funding acquisition, Writing &#x2013; review &#x0026; editing, Formal analysis, Data curation, Methodology, Conceptualization, Project administration, Supervision, Investigation. HL: Writing &#x2013; review &#x0026; editing, Conceptualization, Investigation, Funding acquisition, Supervision, Resources, Data curation, Project administration, Formal analysis, Methodology. IH: Resources, Formal analysis, Supervision, Writing &#x2013; review &#x0026; editing, Investigation, Methodology, Conceptualization, Funding acquisition, Software, Project administration.</p>
</sec>
<ack>
<title>Acknowledgments</title>
<p>Parts of this research were conducted using the supercomputer MOGON 2 and/or advisory services offered by Johannes Gutenberg-University Mainz (<ext-link xlink:href="https://hpc.uni-mainz.de/" ext-link-type="uri">hpc.uni-mainz.de</ext-link>), which is a member of the AHRP (Alliance for High Performance Computing in Rhineland-Palatinate, <ext-link xlink:href="http://www.ahrp.info" ext-link-type="uri">www.ahrp.info</ext-link>) and the Gauss Alliance e.V. The authors gratefully acknowledge the computing time granted on the supercomputer MOGON 2 at Johannes Gutenberg-University Mainz (<ext-link xlink:href="https://hpc.uni-mainz.de/" ext-link-type="uri">hpc.uni-mainz.de</ext-link>).</p>
</ack>
<sec sec-type="COI-statement" id="sec20">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec21">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec22">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec23">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fnins.2026.1605209/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fnins.2026.1605209/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Supplementary_file_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Andersen</surname><given-names>R. A.</given-names></name> <name><surname>Musallam</surname><given-names>S.</given-names></name> <name><surname>Pesaran</surname><given-names>B.</given-names></name></person-group> (<year>2004</year>). <article-title>Selecting the signals for a brain&#x2013;machine interface</article-title>. <source>Curr. Opin. Neurobiol.</source> <volume>14</volume>, <fpage>720</fpage>&#x2013;<lpage>726</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.conb.2004.10.005</pub-id>, <pub-id pub-id-type="pmid">15582374</pub-id></mixed-citation></ref>
<ref id="ref2"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arabzadeh</surname><given-names>E.</given-names></name> <name><surname>Panzeri</surname><given-names>S.</given-names></name> <name><surname>Diamond</surname><given-names>M. E.</given-names></name></person-group> (<year>2006</year>). <article-title>Deciphering the spike train of a sensory neuron: counts and temporal patterns in the rat whisker pathway</article-title>. <source>J. Neurosci.</source> <volume>26</volume>, <fpage>9216</fpage>&#x2013;<lpage>9226</lpage>. doi: <pub-id pub-id-type="doi">10.1523/jneurosci.1491-06.2006</pub-id></mixed-citation></ref>
<ref id="ref3"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Behrenbeck</surname><given-names>J.</given-names></name> <name><surname>Tayeb</surname><given-names>Z.</given-names></name> <name><surname>Bhiri</surname><given-names>C.</given-names></name> <name><surname>Richter</surname><given-names>C.</given-names></name> <name><surname>Rhodes</surname><given-names>O.</given-names></name> <name><surname>Kasabov</surname><given-names>N.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>Classification and regression of spatio-temporal signals using NeuCube and its realization on SpiNNaker neuromorphic hardware</article-title>. <source>J. Neural Eng.</source> <volume>16</volume>:<fpage>026014</fpage>. doi: <pub-id pub-id-type="doi">10.1088/1741-2552/aafabc</pub-id>, <pub-id pub-id-type="pmid">30577030</pub-id></mixed-citation></ref>
<ref id="ref4"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bhadra</surname><given-names>K.</given-names></name> <name><surname>Giraud</surname><given-names>A.-L.</given-names></name> <name><surname>Marchesotti</surname><given-names>S.</given-names></name></person-group> (<year>2025</year>). <article-title>Learning to operate an imagined speech brain-computer interface involves the spatial and frequency tuning of neural activity</article-title>. <source>Commun. Biol.</source> <volume>8</volume>:<fpage>271</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s42003-025-07464-7</pub-id>, <pub-id pub-id-type="pmid">39979463</pub-id></mixed-citation></ref>
<ref id="ref5"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bonaiuto</surname><given-names>J. J.</given-names></name> <name><surname>Rossiter</surname><given-names>H. E.</given-names></name> <name><surname>Meyer</surname><given-names>S. S.</given-names></name> <name><surname>Adams</surname><given-names>N.</given-names></name> <name><surname>Little</surname><given-names>S.</given-names></name> <name><surname>Callaghan</surname><given-names>M. F.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Non-invasive laminar inference with MEG: comparison of methods and source inversion algorithms</article-title>. <source>NeuroImage</source> <volume>167</volume>, <fpage>372</fpage>&#x2013;<lpage>383</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroimage.2017.11.068</pub-id>, <pub-id pub-id-type="pmid">29203456</pub-id></mixed-citation></ref>
<ref id="ref6"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Breiman</surname><given-names>L.</given-names></name></person-group> (<year>2001</year>). <article-title>Random forests</article-title>. <source>Mach. Learn.</source> <volume>45</volume>, <fpage>5</fpage>&#x2013;<lpage>32</lpage>. doi: <pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id></mixed-citation></ref>
<ref id="ref7"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Brigham</surname><given-names>E. O.</given-names></name></person-group> (<year>1988</year>). <source>The Fast Fourier Transform and its Applications</source>. <publisher-loc>Englewood Cliffs, New Jersey, USA</publisher-loc>: <publisher-name>Prentice-Hall, Inc</publisher-name>.</mixed-citation></ref>
<ref id="ref8"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buccelli</surname><given-names>S.</given-names></name> <name><surname>Bornat</surname><given-names>Y.</given-names></name> <name><surname>Colombi</surname><given-names>I.</given-names></name> <name><surname>Ambroise</surname><given-names>M.</given-names></name> <name><surname>Martines</surname><given-names>L.</given-names></name> <name><surname>Pasquale</surname><given-names>V.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>A neuromorphic prosthesis to restore communication in neuronal networks</article-title>. <source>IScience</source> <volume>19</volume>, <fpage>402</fpage>&#x2013;<lpage>414</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.isci.2019.07.046</pub-id>, <pub-id pub-id-type="pmid">31421595</pub-id></mixed-citation></ref>
<ref id="ref9"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buzs&#x00E1;ki</surname><given-names>G.</given-names></name> <name><surname>Anastassiou</surname><given-names>C. A.</given-names></name> <name><surname>Koch</surname><given-names>C.</given-names></name></person-group> (<year>2012</year>). <article-title>The origin of extracellular fields and currents&#x2014;EEG, ECoG, LFP and spikes</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>13</volume>, <fpage>407</fpage>&#x2013;<lpage>420</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nrn3241</pub-id>, <pub-id pub-id-type="pmid">22595786</pub-id></mixed-citation></ref>
<ref id="ref10"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carcea</surname><given-names>I.</given-names></name> <name><surname>Insanally</surname><given-names>M. N.</given-names></name> <name><surname>Froemke</surname><given-names>R. C.</given-names></name></person-group> (<year>2017</year>). <article-title>Dynamics of auditory cortical activity during behavioural engagement and auditory perception</article-title>. <source>Nat. Commun.</source> <volume>8</volume>:<fpage>14412</fpage>. doi: <pub-id pub-id-type="doi">10.1038/ncomms14412</pub-id>, <pub-id pub-id-type="pmid">28176787</pub-id></mixed-citation></ref>
<ref id="ref11"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Carlini</surname><given-names>N.</given-names></name> <name><surname>Wagner</surname><given-names>D.</given-names></name></person-group> (<year>2017</year>). &#x201C;<chapter-title>Towards evaluating the robustness of neural networks</chapter-title>,&#x201D; in <source>2017 IEEE Symposium on Security and Privacy (SP)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>39</fpage>&#x2013;<lpage>57</lpage>.</mixed-citation></ref>
<ref id="ref12"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Chen</surname><given-names>T.</given-names></name> <name><surname>Guestrin</surname><given-names>C.</given-names></name></person-group> (<year>2016</year>). &#x201C;<chapter-title>XGBoost: a scalable tree boosting system</chapter-title>,&#x201D; in <source>Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>ACM</publisher-name>), <fpage>785</fpage>&#x2013;<lpage>794</lpage>.</mixed-citation></ref>
<ref id="ref13"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen-Bee</surname><given-names>C. H.</given-names></name> <name><surname>Zhou</surname><given-names>Y.</given-names></name> <name><surname>Jacobs</surname><given-names>N. S.</given-names></name> <name><surname>Lim</surname><given-names>B.</given-names></name> <name><surname>Frostig</surname><given-names>R. D.</given-names></name></person-group> (<year>2012</year>). <article-title>Whisker array functional representation in rat barrel cortex: transcendence of one-to-one topography and its underlying mechanism</article-title>. <source>Front. Neural Circuits</source> <volume>6</volume>:<fpage>93</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fncir.2012.00093</pub-id>, <pub-id pub-id-type="pmid">23205005</pub-id></mixed-citation></ref>
<ref id="ref14"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Chikkudu</surname><given-names>S.</given-names></name> <name><surname>Annamalai</surname><given-names>S.</given-names></name></person-group> (<year>2024</year>). &#x201C;<chapter-title>Motor-imagery signal preprocessing using the SMOTE technique and neuromuscular disorder detection</chapter-title>,&#x201D; in <source>2024 International Conference on Emerging Techniques in Computational Intelligence (ICETCI)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>324</fpage>&#x2013;<lpage>329</lpage>.</mixed-citation></ref>
<ref id="ref15"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cowley</surname><given-names>B. R.</given-names></name> <name><surname>Snyder</surname><given-names>A. C.</given-names></name> <name><surname>Acar</surname><given-names>K.</given-names></name> <name><surname>Williamson</surname><given-names>R. C.</given-names></name> <name><surname>Yu</surname><given-names>B. M.</given-names></name> <name><surname>Smith</surname><given-names>M. A.</given-names></name></person-group> (<year>2020</year>). <article-title>Slow drift of neural activity as a signature of impulsivity in macaque visual and prefrontal cortex</article-title>. <source>Neuron</source> <volume>108</volume>, <fpage>551</fpage>&#x2013;<lpage>567.e8</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2020.07.021</pub-id>, <pub-id pub-id-type="pmid">32810433</pub-id></mixed-citation></ref>
<ref id="ref16"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>De Franceschi</surname><given-names>G.</given-names></name> <name><surname>Barkat</surname><given-names>T. R.</given-names></name></person-group> (<year>2021</year>). <article-title>Task-induced modulations of neuronal activity along the auditory pathway</article-title>. <source>Cell Rep.</source> <volume>37</volume>, <fpage>1</fpage>&#x2013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.celrep.2021.110115</pub-id>, <pub-id pub-id-type="pmid">34910908</pub-id></mixed-citation></ref>
<ref id="ref17"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dempster</surname><given-names>A.</given-names></name> <name><surname>Petitjean</surname><given-names>F.</given-names></name> <name><surname>Webb</surname><given-names>G. I.</given-names></name></person-group> (<year>2020</year>). <article-title>Rocket: exceptionally fast and accurate time series classification using random convolutional kernels</article-title>. <source>Data Min. Knowl. Discov.</source> <volume>34</volume>, <fpage>1454</fpage>&#x2013;<lpage>1495</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10618-020-00701-z</pub-id></mixed-citation></ref>
<ref id="ref18"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Feldmeyer</surname><given-names>D.</given-names></name> <name><surname>Brecht</surname><given-names>M.</given-names></name> <name><surname>Helmchen</surname><given-names>F.</given-names></name> <name><surname>Petersen</surname><given-names>C. C.</given-names></name> <name><surname>Poulet</surname><given-names>J. F.</given-names></name> <name><surname>Staiger</surname><given-names>J. F.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Barrel cortex function</article-title>. <source>Prog. Neurobiol.</source> <volume>103</volume>, <fpage>3</fpage>&#x2013;<lpage>27</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.pneurobio.2012.11.002</pub-id>, <pub-id pub-id-type="pmid">23195880</pub-id></mixed-citation></ref>
<ref id="ref19"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>G&#x0103;m&#x0103;nu&#x0163;</surname><given-names>R.</given-names></name> <name><surname>Kennedy</surname><given-names>H.</given-names></name> <name><surname>Toroczkai</surname><given-names>Z.</given-names></name> <name><surname>Ercsey-Ravasz</surname><given-names>M.</given-names></name> <name><surname>van Essen</surname><given-names>D. C.</given-names></name> <name><surname>Knoblauch</surname><given-names>K.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>The mouse cortical connectome, characterized by an ultra-dense cortical graph, maintains specificity by distinct connectivity profiles</article-title>. <source>Neuron</source> <volume>97</volume>, <fpage>698</fpage>&#x2013;<lpage>715.e10</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2017.12.037</pub-id>, <pub-id pub-id-type="pmid">29420935</pub-id></mixed-citation></ref>
<ref id="ref20"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Giri</surname><given-names>H. R.</given-names></name> <name><surname>Negi</surname><given-names>P. C. B. S.</given-names></name> <name><surname>Sharma</surname><given-names>S.</given-names></name> <name><surname>Sharma</surname><given-names>N.</given-names></name></person-group> (<year>2024</year>). &#x201C;<chapter-title>An intuitive real-time brain control interface based on motor imagery and execution</chapter-title>,&#x201D; in <source>2024 IEEE 4th International Conference on Human-Machine Systems (ICHMS)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation></ref>
<ref id="ref21"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Guggenmos</surname><given-names>D. J.</given-names></name> <name><surname>Azin</surname><given-names>M.</given-names></name> <name><surname>Barbay</surname><given-names>S.</given-names></name> <name><surname>Mahnken</surname><given-names>J. D.</given-names></name> <name><surname>Dunham</surname><given-names>C.</given-names></name> <name><surname>Mohseni</surname><given-names>P.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Restoration of function after brain damage using a neural prosthesis</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>110</volume>, <fpage>21177</fpage>&#x2013;<lpage>21182</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.1316885110</pub-id>, <pub-id pub-id-type="pmid">24324155</pub-id></mixed-citation></ref>
<ref id="ref22"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Horenko</surname><given-names>I.</given-names></name></person-group> (<year>2020</year>). <article-title>On a scalable entropic breaching of the overfitting barrier for small data problems in machine learning</article-title>. <source>Neural Comput.</source> <volume>32</volume>, <fpage>1563</fpage>&#x2013;<lpage>1579</lpage>. doi: <pub-id pub-id-type="doi">10.1162/neco</pub-id>, <pub-id pub-id-type="pmid">32521216</pub-id></mixed-citation></ref>
<ref id="ref23"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jackson</surname><given-names>A.</given-names></name> <name><surname>Mavoori</surname><given-names>J.</given-names></name> <name><surname>Fetz</surname><given-names>E. E.</given-names></name></person-group> (<year>2006</year>). <article-title>Long-term motor cortex plasticity induced by an electronic neural implant</article-title>. <source>Nature</source> <volume>444</volume>, <fpage>56</fpage>&#x2013;<lpage>60</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nature05226</pub-id></mixed-citation></ref>
<ref id="ref24"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Khanday</surname><given-names>O. M.</given-names></name> <name><surname>Ouellet</surname><given-names>M.</given-names></name> <name><surname>P&#x00E9;rez C&#x00F3;rdoba</surname><given-names>J. L.</given-names></name></person-group> (<year>2024</year>). <source>Decoding the Mind: Neural Differences and Semantic Representation in Perception and Imagination across Modalities</source>.</mixed-citation></ref>
<ref id="ref25"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Kleinbaum</surname><given-names>D. G.</given-names></name> <name><surname>Klein</surname><given-names>M.</given-names></name></person-group> (<year>2010</year>). <source>Logistic Regression</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer</publisher-name>.</mixed-citation></ref>
<ref id="ref26"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krucoff</surname><given-names>M. O.</given-names></name> <name><surname>Rahimpour</surname><given-names>S.</given-names></name> <name><surname>Slutzky</surname><given-names>M. W.</given-names></name> <name><surname>Edgerton</surname><given-names>V. R.</given-names></name> <name><surname>Turner</surname><given-names>D. A.</given-names></name></person-group> (<year>2016</year>). <article-title>Enhancing nervous system recovery through neurobiologics, neural interface training, and neurorehabilitation</article-title>. <source>Front. Neurosci.</source> <volume>10</volume>:<fpage>584</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2016.00584</pub-id>, <pub-id pub-id-type="pmid">28082858</pub-id></mixed-citation></ref>
<ref id="ref27"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>S.-H.</given-names></name> <name><surname>Dan</surname><given-names>Y.</given-names></name></person-group> (<year>2012</year>). <article-title>Neuromodulation of brain states</article-title>. <source>Neuron</source> <volume>76</volume>, <fpage>209</fpage>&#x2013;<lpage>222</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2012.09.012</pub-id>, <pub-id pub-id-type="pmid">23040816</pub-id></mixed-citation></ref>
<ref id="ref28"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Loh</surname><given-names>W.</given-names></name></person-group> (<year>2011</year>). <article-title>Classification and regression trees</article-title>. <source>WIREs Data Min. Knowl. Discov.</source> <volume>1</volume>, <fpage>14</fpage>&#x2013;<lpage>23</lpage>. doi: <pub-id pub-id-type="doi">10.1002/widm.8</pub-id></mixed-citation></ref>
<ref id="ref29"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>L&#x00F6;ning</surname><given-names>M.</given-names></name> <name><surname>Bagnall</surname><given-names>A.</given-names></name> <name><surname>Ganesh</surname><given-names>S.</given-names></name> <name><surname>Kazakov</surname><given-names>V.</given-names></name> <name><surname>Lines</surname><given-names>J.</given-names></name> <name><surname>Kir&#x00E1;ly</surname><given-names>F. J.</given-names></name> <etal/></person-group> (<year>2019</year>). <article-title>Sktime: a unified interface for machine learning with time series</article-title>. arXiv preprint arXiv:1909.07872. doi: <pub-id pub-id-type="doi">10.48550/arXiv.1909.07872</pub-id></mixed-citation></ref>
<ref id="ref30"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Maass</surname><given-names>W.</given-names></name> <name><surname>Natschl&#x00E4;ger</surname><given-names>T.</given-names></name> <name><surname>Markram</surname><given-names>H.</given-names></name></person-group> (<year>2002</year>). <article-title>Real-time computing without stable states: a new framework for neural computation based on perturbations</article-title>. <source>Neural Comput.</source> <volume>14</volume>, <fpage>2531</fpage>&#x2013;<lpage>2560</lpage>. doi: <pub-id pub-id-type="doi">10.1162/089976602760407955</pub-id>, <pub-id pub-id-type="pmid">12433288</pub-id></mixed-citation></ref>
<ref id="ref31"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Markowitz</surname><given-names>D. A.</given-names></name> <name><surname>Wong</surname><given-names>Y. T.</given-names></name> <name><surname>Gray</surname><given-names>C. M.</given-names></name> <name><surname>Pesaran</surname><given-names>B.</given-names></name></person-group> (<year>2011</year>). <article-title>Optimizing the decoding of movement goals from local field potentials in macaque cortex</article-title>. <source>J. Neurosci.</source> <volume>31</volume>, <fpage>18412</fpage>&#x2013;<lpage>18422</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.4165-11.2011</pub-id>, <pub-id pub-id-type="pmid">22171043</pub-id></mixed-citation></ref>
<ref id="ref32"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Martinez</surname><given-names>D.</given-names></name> <name><surname>Cl&#x00E9;ment</surname><given-names>M.</given-names></name> <name><surname>Messaoudi</surname><given-names>B.</given-names></name> <name><surname>Gervasoni</surname><given-names>D.</given-names></name> <name><surname>Litaudon</surname><given-names>P.</given-names></name> <name><surname>Buonviso</surname><given-names>N.</given-names></name></person-group> (<year>2018</year>). <article-title>Adaptive quantization of local field potentials for wireless implants in freely moving animals: an open-source neural recording device</article-title>. <source>J. Neural Eng.</source> <volume>15</volume>:<fpage>025001</fpage>. doi: <pub-id pub-id-type="doi">10.1088/1741-2552/aaa041</pub-id>, <pub-id pub-id-type="pmid">29219118</pub-id></mixed-citation></ref>
<ref id="ref33"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Menon</surname><given-names>A.</given-names></name> <name><surname>Olascoaga</surname><given-names>L. I. G.</given-names></name> <name><surname>Balanaga</surname><given-names>V.</given-names></name> <name><surname>Natarajan</surname><given-names>A.</given-names></name> <name><surname>Ruffing</surname><given-names>J.</given-names></name> <name><surname>Ardalan</surname><given-names>R.</given-names></name> <etal/></person-group>. (<year>2023</year>). &#x201C;<chapter-title>Shared control of assistive robots through user-intent prediction and hyperdimensional recall of reactive behavior</chapter-title>,&#x201D; in <source>2023 IEEE International Conference on Robotics and Automation (ICRA)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>12638</fpage>&#x2013;<lpage>12644</lpage>.</mixed-citation></ref>
<ref id="ref34"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Niculescu-Mizil</surname><given-names>A.</given-names></name> <name><surname>Caruana</surname><given-names>R.</given-names></name></person-group> (<year>2005</year>). &#x201C;<chapter-title>Predicting good probabilities with supervised learning</chapter-title>,&#x201D; in <source>Proceedings of the 22nd International Conference on Machine Learning - ICML &#x2018;05</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>ACM Press</publisher-name>), <fpage>625</fpage>&#x2013;<lpage>632</lpage>.</mixed-citation></ref>
<ref id="ref35"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>O&#x2019;Doherty</surname><given-names>J. E.</given-names></name> <name><surname>Lebedev</surname><given-names>M. A.</given-names></name> <name><surname>Ifft</surname><given-names>P. J.</given-names></name> <name><surname>Zhuang</surname><given-names>K. Z.</given-names></name> <name><surname>Shokur</surname><given-names>S.</given-names></name> <name><surname>Bleuler</surname><given-names>H.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Active tactile exploration using a brain&#x2013;machine&#x2013;brain interface</article-title>. <source>Nature</source> <volume>479</volume>, <fpage>228</fpage>&#x2013;<lpage>231</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nature10489</pub-id>, <pub-id pub-id-type="pmid">21976021</pub-id></mixed-citation></ref>
<ref id="ref36"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Otazu</surname><given-names>G. H.</given-names></name> <name><surname>Tai</surname><given-names>L.-H.</given-names></name> <name><surname>Yang</surname><given-names>Y.</given-names></name> <name><surname>Zador</surname><given-names>A. M.</given-names></name></person-group> (<year>2009</year>). <article-title>Engaging in an auditory task suppresses responses in auditory cortex</article-title>. <source>Nat. Neurosci.</source> <volume>12</volume>, <fpage>646</fpage>&#x2013;<lpage>654</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.2306</pub-id>, <pub-id pub-id-type="pmid">19363491</pub-id></mixed-citation></ref>
<ref id="ref37"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Pantiskas</surname><given-names>L.</given-names></name> <name><surname>Verstoep</surname><given-names>K.</given-names></name> <name><surname>Hoogendoorn</surname><given-names>M.</given-names></name> <name><surname>Bal</surname><given-names>H.</given-names></name></person-group> (<year>2022</year>). &#x201C;<chapter-title>Taking ROCKET on an efficiency mission: multivariate time series classification with LightWaveS</chapter-title>,&#x201D; in <source>2022 18th International Conference on Distributed Computing in Sensor Systems (DCOSS)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>149</fpage>&#x2013;<lpage>152</lpage>.</mixed-citation></ref>
<ref id="ref38"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pedregosa</surname><given-names>F.</given-names></name> <name><surname>Varoquaux</surname><given-names>G.</given-names></name> <name><surname>Gramfort</surname><given-names>A.</given-names></name> <name><surname>Michel</surname><given-names>V.</given-names></name> <name><surname>Thirion</surname><given-names>B.</given-names></name> <name><surname>Grisel</surname><given-names>O.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Scikit-learn: machine learning in Python</article-title>. <source>J. Mach. Learn. Res.</source> <volume>12</volume>, <fpage>2825</fpage>&#x2013;<lpage>2830</lpage>. doi: <pub-id pub-id-type="doi">10.5555/1953048.2078195</pub-id></mixed-citation></ref>
<ref id="ref39"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Petschenig</surname><given-names>H.</given-names></name> <name><surname>Bisio</surname><given-names>M.</given-names></name> <name><surname>Maschietto</surname><given-names>M.</given-names></name> <name><surname>Leparulo</surname><given-names>A.</given-names></name> <name><surname>Legenstein</surname><given-names>R.</given-names></name> <name><surname>Vassanelli</surname><given-names>S.</given-names></name></person-group> (<year>2022</year>). <article-title>Classification of whisker deflections from evoked responses in the somatosensory barrel cortex with spiking neural networks</article-title>. <source>Front. Neurosci.</source> <volume>16</volume>:<fpage>838054</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2022.838054</pub-id>, <pub-id pub-id-type="pmid">35495034</pub-id></mixed-citation></ref>
<ref id="ref40"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pinto</surname><given-names>D. J.</given-names></name> <name><surname>Brumberg</surname><given-names>J. C.</given-names></name> <name><surname>Simons</surname><given-names>D. J.</given-names></name></person-group> (<year>2000</year>). <article-title>Circuit dynamics and coding strategies in rodent somatosensory cortex</article-title>. <source>J. Neurophysiol.</source> <volume>83</volume>, <fpage>1158</fpage>&#x2013;<lpage>1166</lpage>. doi: <pub-id pub-id-type="doi">10.1152/jn.2000.83.3.1158</pub-id>, <pub-id pub-id-type="pmid">10712446</pub-id></mixed-citation></ref>
<ref id="ref41"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Platt</surname><given-names>J.</given-names></name></person-group> (<year>1999</year>). <article-title>Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods</article-title>. <source>Adv. Large Margin Classifiers</source> <volume>10</volume>, <fpage>61</fpage>&#x2013;<lpage>74</lpage>.</mixed-citation></ref>
<ref id="ref42"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rapeaux</surname><given-names>A. B.</given-names></name> <name><surname>Constandinou</surname><given-names>T. G.</given-names></name></person-group> (<year>2021</year>). <article-title>Implantable brain machine interfaces: first-in-human studies, technology challenges and trends</article-title>. <source>Curr. Opin. Biotechnol.</source> <volume>72</volume>, <fpage>102</fpage>&#x2013;<lpage>111</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.copbio.2021.10.001</pub-id>, <pub-id pub-id-type="pmid">34749248</pub-id></mixed-citation></ref>
<ref id="ref43"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reyes-Puerta</surname><given-names>V.</given-names></name> <name><surname>Kim</surname><given-names>S.</given-names></name> <name><surname>Sun</surname><given-names>J.-J.</given-names></name> <name><surname>Imbrosci</surname><given-names>B.</given-names></name> <name><surname>Kilb</surname><given-names>W.</given-names></name> <name><surname>Luhmann</surname><given-names>H. J.</given-names></name></person-group> (<year>2015</year>). <article-title>High stimulus-related information in barrel cortex inhibitory interneurons</article-title>. <source>PLoS Comput. Biol.</source> <volume>11</volume>:<fpage>e1004121</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pcbi.1004121</pub-id>, <pub-id pub-id-type="pmid">26098109</pub-id></mixed-citation></ref>
<ref id="ref44"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Reyes-Puerta</surname><given-names>V.</given-names></name> <name><surname>Sun</surname><given-names>J.-J.</given-names></name> <name><surname>Kim</surname><given-names>S.</given-names></name> <name><surname>Kilb</surname><given-names>W.</given-names></name> <name><surname>Luhmann</surname><given-names>H. J</given-names></name></person-group>. (<year>2015</year>). <article-title>Laminar and columnar structure of sensory-evoked multineuronal spike sequences in adult rat barrel cortex in vivo</article-title>. <source>Cereb. Cortex</source> <volume>25</volume>, <fpage>2001</fpage>&#x2013;<lpage>2021</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhu007</pub-id></mixed-citation></ref>
<ref id="ref45"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rezeika</surname><given-names>A.</given-names></name> <name><surname>Benda</surname><given-names>M.</given-names></name> <name><surname>Stawicki</surname><given-names>P.</given-names></name> <name><surname>Gembler</surname><given-names>F.</given-names></name> <name><surname>Saboor</surname><given-names>A.</given-names></name> <name><surname>Volosyak</surname><given-names>I.</given-names></name></person-group> (<year>2018</year>). <article-title>Brain&#x2013;computer interface spellers: a review</article-title>. <source>Brain Sci.</source> <volume>8</volume>:<fpage>57</fpage>. doi: <pub-id pub-id-type="doi">10.3390/brainsci8040057</pub-id>, <pub-id pub-id-type="pmid">29601538</pub-id></mixed-citation></ref>
<ref id="ref46"><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Rice</surname><given-names>L.</given-names></name> <name><surname>Wong</surname><given-names>E.</given-names></name> <name><surname>Kolter</surname><given-names>Z.</given-names></name></person-group> (<year>2020</year>). &#x201C;<chapter-title>Overfitting in adversarially robust deep learning</chapter-title>,&#x201D; in <source>International Conference on Machine Learning, PMLR</source>, (<publisher-loc>Granada, Spain</publisher-loc>), <fpage>8093</fpage>&#x2013;<lpage>8104</lpage>.</mixed-citation></ref>
<ref id="ref47"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ritt</surname><given-names>J. T.</given-names></name> <name><surname>Andermann</surname><given-names>M. L.</given-names></name> <name><surname>Moore</surname><given-names>C. I.</given-names></name></person-group> (<year>2008</year>). <article-title>Embodied information processing: vibrissa mechanics and texture features shape micromotions in actively sensing rats</article-title>. <source>Neuron</source> <volume>57</volume>, <fpage>599</fpage>&#x2013;<lpage>613</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2007.12.024</pub-id>, <pub-id pub-id-type="pmid">18304488</pub-id></mixed-citation></ref>
<ref id="ref48"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rossant</surname><given-names>C.</given-names></name> <name><surname>Kadir</surname><given-names>S. N.</given-names></name> <name><surname>Goodman</surname><given-names>D. F.</given-names></name> <name><surname>Goodman</surname><given-names>D. F. M.</given-names></name> <name><surname>Schulman</surname><given-names>J.</given-names></name> <name><surname>Hunter</surname><given-names>M. L. D.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Spike sorting for large, dense electrode arrays</article-title>. <source>Nat. Neurosci.</source> <volume>19</volume>, <fpage>634</fpage>&#x2013;<lpage>641</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.4268</pub-id>, <pub-id pub-id-type="pmid">26974951</pub-id></mixed-citation></ref>
<ref id="ref49"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saito</surname><given-names>T.</given-names></name> <name><surname>Rehmsmeier</surname><given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>The precision-recall plot is more informative than the ROC plot when evaluating binary classifiers on imbalanced datasets</article-title>. <source>PLoS One</source> <volume>10</volume>:<fpage>e0118432</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0118432</pub-id>, <pub-id pub-id-type="pmid">25738806</pub-id></mixed-citation></ref>
<ref id="ref50"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schober</surname><given-names>P.</given-names></name> <name><surname>Boer</surname><given-names>C.</given-names></name> <name><surname>Schwarte</surname><given-names>L. A.</given-names></name></person-group> (<year>2018</year>). <article-title>Correlation coefficients: appropriate use and interpretation</article-title>. <source>Anesth. Analg.</source> <volume>126</volume>, <fpage>1763</fpage>&#x2013;<lpage>1768</lpage>. doi: <pub-id pub-id-type="doi">10.1213/ANE.0000000000002864</pub-id>, <pub-id pub-id-type="pmid">29481436</pub-id></mixed-citation></ref>
<ref id="ref51"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sederberg</surname><given-names>A. J.</given-names></name> <name><surname>Pala</surname><given-names>A.</given-names></name> <name><surname>Zheng</surname><given-names>H. J.</given-names></name> <name><surname>He</surname><given-names>B. J.</given-names></name> <name><surname>Stanley</surname><given-names>G. B</given-names></name></person-group>. (<year>2019</year>). <article-title>State-aware detection of sensory stimuli in the cortex of the awake mouse</article-title>. <source>PLoS Comput. Biol.</source> <volume>15</volume>:<fpage>e1006716</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pcbi.1006716</pub-id></mixed-citation></ref>
<ref id="ref52"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Selesnick</surname><given-names>I. W.</given-names></name> <name><surname>Burrus</surname><given-names>C. S.</given-names></name></person-group> (<year>2002</year>). <article-title>Generalized digital Butterworth filter design</article-title>. <source>IEEE Trans. Signal Process.</source> <volume>46</volume>, <fpage>1688</fpage>&#x2013;<lpage>1694</lpage>. doi: <pub-id pub-id-type="doi">10.1109/78.678493</pub-id></mixed-citation></ref>
<ref id="ref53"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Semprini</surname><given-names>M.</given-names></name> <name><surname>Laffranchi</surname><given-names>M.</given-names></name> <name><surname>Sanguineti</surname><given-names>V.</given-names></name> <name><surname>Avanzino</surname><given-names>L.</given-names></name> <name><surname>de Icco</surname><given-names>R.</given-names></name> <name><surname>de Michieli</surname><given-names>L.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Technological approaches for neurorehabilitation: from robotic devices to brain stimulation and beyond</article-title>. <source>Front. Neurol.</source> <volume>9</volume>:<fpage>212</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fneur.2018.00212</pub-id>, <pub-id pub-id-type="pmid">29686644</pub-id></mixed-citation></ref>
<ref id="ref54"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sitaram</surname><given-names>R.</given-names></name> <name><surname>Ros</surname><given-names>T.</given-names></name> <name><surname>Stoeckel</surname><given-names>L.</given-names></name> <name><surname>Haller</surname><given-names>S.</given-names></name> <name><surname>Scharnowski</surname><given-names>F.</given-names></name> <name><surname>Lewis-Peacock</surname><given-names>J.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Closed-loop brain training: the science of neurofeedback</article-title>. <source>Nat. Rev. Neurosci.</source> <volume>18</volume>, <fpage>86</fpage>&#x2013;<lpage>100</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nrn.2016.164</pub-id>, <pub-id pub-id-type="pmid">28003656</pub-id></mixed-citation></ref>
<ref id="ref55"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Srimaharaj</surname><given-names>W.</given-names></name> <name><surname>Chaisricharoen</surname><given-names>R.</given-names></name></person-group> (<year>2021</year>). <article-title>A novel processing model for P300 brainwaves detection</article-title>. <source>J. Web Eng.</source> <volume>20</volume>, <fpage>2545</fpage>&#x2013;<lpage>2570</lpage>. doi: <pub-id pub-id-type="doi">10.13052/jwe1540-9589.20815</pub-id></mixed-citation></ref>
<ref id="ref56"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Staiger</surname><given-names>J. F.</given-names></name> <name><surname>Petersen</surname><given-names>C. C.</given-names></name></person-group> (<year>2021</year>). <article-title>Neuronal circuits in barrel cortex for whisker sensory perception</article-title>. <source>Physiol. Rev.</source> <volume>101</volume>, <fpage>353</fpage>&#x2013;<lpage>415</lpage>. doi: <pub-id pub-id-type="doi">10.1152/physrev.00019.2019</pub-id></mixed-citation></ref>
<ref id="ref57"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Steriade</surname><given-names>M.</given-names></name> <name><surname>Nunez</surname><given-names>A.</given-names></name> <name><surname>Amzica</surname><given-names>F.</given-names></name></person-group> (<year>1993</year>). <article-title>A novel slow (&#x003C; 1 Hz) oscillation of neocortical neurons in vivo: depolarizing and hyperpolarizing components</article-title>. <source>J. Neurosci.</source> <volume>13</volume>, <fpage>3252</fpage>&#x2013;<lpage>3265</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.13-08-03252.1993</pub-id>, <pub-id pub-id-type="pmid">8340806</pub-id></mixed-citation></ref>
<ref id="ref58"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>St&#x00FC;ttgen</surname><given-names>M. C.</given-names></name> <name><surname>R&#x00FC;ter</surname><given-names>J.</given-names></name> <name><surname>Schwarz</surname><given-names>C.</given-names></name></person-group> (<year>2006</year>). <article-title>Two psychophysical channels of whisker deflection in rats align with two neuronal classes of primary afferents</article-title>. <source>J. Neurosci.</source> <volume>26</volume>, <fpage>7933</fpage>&#x2013;<lpage>7941</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.1864-06.2006</pub-id>, <pub-id pub-id-type="pmid">16870738</pub-id></mixed-citation></ref>
<ref id="ref59"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>St&#x00FC;ttgen</surname><given-names>M. C.</given-names></name> <name><surname>Schwarz</surname><given-names>C.</given-names></name></person-group> (<year>2008</year>). <article-title>Psychophysical and neurometric detection performance under stimulus uncertainty</article-title>. <source>Nat. Neurosci.</source> <volume>11</volume>, <fpage>1091</fpage>&#x2013;<lpage>1099</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.2162</pub-id>, <pub-id pub-id-type="pmid">19160508</pub-id></mixed-citation></ref>
<ref id="ref60"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>St&#x00FC;ttgen</surname><given-names>M. C.</given-names></name> <name><surname>Schwarz</surname><given-names>C.</given-names></name></person-group> (<year>2010</year>). <article-title>Integration of vibrotactile signals for whisker-related perception in rats is governed by short time constants: comparison of neurometric and psychometric detection performance</article-title>. <source>J. Neurosci.</source> <volume>30</volume>, <fpage>2060</fpage>&#x2013;<lpage>2069</lpage>. doi: <pub-id pub-id-type="doi">10.1523/JNEUROSCI.3943-09.2010</pub-id>, <pub-id pub-id-type="pmid">20147534</pub-id></mixed-citation></ref>
<ref id="ref61"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>St&#x00FC;ttgen</surname><given-names>M. C.</given-names></name> <name><surname>Schwarz</surname><given-names>C.</given-names></name></person-group> (<year>2018</year>). <article-title>Barrel cortex: what is it good for?</article-title> <source>Neuroscience</source> <volume>368</volume>, <fpage>3</fpage>&#x2013;<lpage>16</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuroscience.2017.05.009</pub-id>, <pub-id pub-id-type="pmid">28526578</pub-id></mixed-citation></ref>
<ref id="ref62"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sunny</surname><given-names>T.</given-names></name> <name><surname>Aparna</surname><given-names>T.</given-names></name> <name><surname>Neethu</surname><given-names>P.</given-names></name> <name><surname>Sunny</surname><given-names>T. D.</given-names></name> <name><surname>Venkateswaran</surname><given-names>J.</given-names></name> <name><surname>Vishnupriya</surname><given-names>V.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Robotic arm with brain&#x2013;computer interfacing</article-title>. <source>Procedia Technol.</source> <volume>24</volume>, <fpage>1089</fpage>&#x2013;<lpage>1096</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.protcy.2016.05.241</pub-id></mixed-citation></ref>
<ref id="ref63"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Temereanca</surname><given-names>S.</given-names></name> <name><surname>Simons</surname><given-names>D. J.</given-names></name></person-group> (<year>2003</year>). <article-title>Local field potentials and the encoding of whisker deflections by population firing synchrony in thalamic barreloids</article-title>. <source>J. Neurophysiol.</source> <volume>89</volume>, <fpage>2137</fpage>&#x2013;<lpage>2145</lpage>. doi: <pub-id pub-id-type="doi">10.1152/jn.00582.2002</pub-id>, <pub-id pub-id-type="pmid">12612019</pub-id></mixed-citation></ref>
<ref id="ref64"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Tiwari</surname><given-names>A.</given-names></name> <name><surname>Chaturvedi</surname><given-names>A.</given-names></name></person-group> (<year>2019</year>). &#x201C;<chapter-title>A multiclass EEG signal classification model using spatial feature extraction and XGBoost algorithm</chapter-title>,&#x201D; in <source>2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>4169</fpage>&#x2013;<lpage>4175</lpage>.</mixed-citation></ref>
<ref id="ref65"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vandevelde</surname><given-names>J. R.</given-names></name> <name><surname>Yang</surname><given-names>J.-W.</given-names></name> <name><surname>Albrecht</surname><given-names>S.</given-names></name> <name><surname>Lam</surname><given-names>H.</given-names></name> <name><surname>Kaufmann</surname><given-names>P.</given-names></name> <name><surname>Luhmann</surname><given-names>H. J.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Layer-and cell-type-specific differences in neural activity in mouse barrel cortex during a whisker detection task</article-title>. <source>Cereb. Cortex</source> <volume>33</volume>, <fpage>1361</fpage>&#x2013;<lpage>1382</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhac141</pub-id>, <pub-id pub-id-type="pmid">35417918</pub-id></mixed-citation></ref>
<ref id="ref66"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vecchi</surname><given-names>E.</given-names></name> <name><surname>Posp&#x00ED;&#x0161;il</surname><given-names>L.</given-names></name> <name><surname>Albrecht</surname><given-names>S.</given-names></name> <name><surname>O'Kane</surname><given-names>T. J.</given-names></name> <name><surname>Horenko</surname><given-names>I.</given-names></name></person-group> (<year>2022</year>). <article-title>eSPA+: scalable entropy-optimal machine learning classification for small data problems</article-title>. <source>Neural Comput.</source> <volume>34</volume>, <fpage>1220</fpage>&#x2013;<lpage>1255</lpage>. doi: <pub-id pub-id-type="doi">10.1162/neco_a_01490</pub-id>, <pub-id pub-id-type="pmid">35344997</pub-id></mixed-citation></ref>
<ref id="ref67"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>X.</given-names></name> <name><surname>Magno</surname><given-names>M.</given-names></name> <name><surname>Cavigelli</surname><given-names>L.</given-names></name> <name><surname>Mahmud</surname><given-names>M.</given-names></name> <name><surname>Cecchetto</surname><given-names>C.</given-names></name> <name><surname>Vassanelli</surname><given-names>S.</given-names></name> <etal/></person-group>. (<year>2018a</year>). &#x201C;<chapter-title>Rat cortical layers classification extracting evoked local field potential images with implanted multi-electrode sensor</chapter-title>,&#x201D; in <source>2018 IEEE 20th International Conference on e-Health Networking, Applications and Services (Healthcom)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>.</mixed-citation></ref>
<ref id="ref68"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>X.</given-names></name> <name><surname>Magno</surname><given-names>M.</given-names></name> <name><surname>Cavigelli</surname><given-names>L.</given-names></name> <name><surname>Mahmud</surname><given-names>M.</given-names></name> <name><surname>Cecchetto</surname><given-names>C.</given-names></name> <name><surname>Vassanelli</surname><given-names>S.</given-names></name> <etal/></person-group>. (<year>2018b</year>). &#x201C;<chapter-title>Embedded classification of local field potentials recorded from rat barrel cortex with implanted multi-electrode array</chapter-title>,&#x201D; in <source>2018 IEEE Biomedical Circuits and Systems Conference (BioCAS)</source>, (<publisher-loc>New York City, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>4</lpage>.</mixed-citation></ref>
<ref id="ref69"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Werner</surname><given-names>T.</given-names></name> <name><surname>Vianello</surname><given-names>E.</given-names></name> <name><surname>Bichler</surname><given-names>O.</given-names></name> <name><surname>Garbin</surname><given-names>D.</given-names></name> <name><surname>Cattaert</surname><given-names>D.</given-names></name> <name><surname>Yvert</surname><given-names>B.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Spiking neural networks based on OxRAM synapses for real-time unsupervised spike sorting</article-title>. <source>Front. Neurosci.</source> <volume>10</volume>:<fpage>474</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2016.00474</pub-id>, <pub-id pub-id-type="pmid">27857680</pub-id></mixed-citation></ref>
<ref id="ref70"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Won</surname><given-names>S. M.</given-names></name> <name><surname>Cai</surname><given-names>L.</given-names></name> <name><surname>Gutruf</surname><given-names>P.</given-names></name> <name><surname>Rogers</surname><given-names>J. A.</given-names></name></person-group> (<year>2023</year>). <article-title>Wireless and battery-free technologies for neuroengineering</article-title>. <source>Nat. Biomed. Eng.</source> <volume>7</volume>, <fpage>405</fpage>&#x2013;<lpage>423</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41551-021-00683-3</pub-id>, <pub-id pub-id-type="pmid">33686282</pub-id></mixed-citation></ref>
<ref id="ref71"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yamashita</surname><given-names>T.</given-names></name> <name><surname>Vavladeli</surname><given-names>A.</given-names></name> <name><surname>Pala</surname><given-names>A.</given-names></name> <name><surname>Galan</surname><given-names>K.</given-names></name> <name><surname>Crochet</surname><given-names>S.</given-names></name> <name><surname>Petersen</surname><given-names>S. S. A.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Diverse long-range axonal projections of excitatory layer 2/3 neurons in mouse barrel cortex</article-title>. <source>Front. Neuroanat.</source> <volume>12</volume>:<fpage>33</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnana.2018.00033</pub-id>, <pub-id pub-id-type="pmid">29765308</pub-id></mixed-citation></ref>
<ref id="ref72"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>H.</given-names></name> <name><surname>Kwon</surname><given-names>S. E.</given-names></name> <name><surname>Severson</surname><given-names>K. S.</given-names></name> <name><surname>O&#x2019;connor</surname><given-names>D. H.</given-names></name></person-group> (<year>2016</year>). <article-title>Origins of choice-related activity in mouse somatosensory cortex</article-title>. <source>Nat. Neurosci.</source> <volume>19</volume>, <fpage>127</fpage>&#x2013;<lpage>134</lpage>. doi: <pub-id pub-id-type="doi">10.1038/nn.4183</pub-id>, <pub-id pub-id-type="pmid">26642088</pub-id></mixed-citation></ref>
<ref id="ref73"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>J.-W.</given-names></name> <name><surname>Prouvot</surname><given-names>P.-H.</given-names></name> <name><surname>Reyes-Puerta</surname><given-names>V.</given-names></name> <name><surname>St&#x00FC;ttgen</surname><given-names>M. C.</given-names></name> <name><surname>Stroh</surname><given-names>A.</given-names></name> <name><surname>Luhmann</surname><given-names>H. J.</given-names></name></person-group> (<year>2017</year>). <article-title>Optogenetic modulation of a minor fraction of parvalbumin-positive interneurons specifically affects spatiotemporal dynamics of spontaneous and sensory-evoked activity in mouse somatosensory cortex in vivo</article-title>. <source>Cereb. Cortex</source> <volume>27</volume>, <fpage>5784</fpage>&#x2013;<lpage>5803</lpage>. doi: <pub-id pub-id-type="doi">10.1093/cercor/bhx261</pub-id>, <pub-id pub-id-type="pmid">29040472</pub-id></mixed-citation></ref>
<ref id="ref74"><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>J.-W.</given-names></name> <name><surname>Prouvot</surname><given-names>P.-H.</given-names></name> <name><surname>Stroh</surname><given-names>A.</given-names></name> <name><surname>Luhmann</surname><given-names>H. J.</given-names></name></person-group> (<year>2018</year>). &#x201C;<chapter-title>Combining optogenetics with mea, depth-resolved LFPs and assessing the scope of optogenetic network modulation</chapter-title>,&#x201D; in <source>Optogenetics: A Roadmap</source>, ed. <person-group person-group-type="editor"><name><surname>Stroh</surname><given-names>A.</given-names></name></person-group> (<publisher-loc>New York, NY</publisher-loc>: <publisher-name>Springer</publisher-name>), <fpage>133</fpage>&#x2013;<lpage>152</lpage>.</mixed-citation></ref>
<ref id="ref75"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yeganeh</surname><given-names>F.</given-names></name> <name><surname>Knauer</surname><given-names>B.</given-names></name> <name><surname>Guimar&#x00E3;es Backhaus</surname><given-names>R.</given-names></name> <name><surname>Yang</surname><given-names>J. W.</given-names></name> <name><surname>Stroh</surname><given-names>A.</given-names></name> <name><surname>Luhmann</surname><given-names>H. J.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Effects of optogenetic inhibition of a small fraction of parvalbumin-positive interneurons on the representation of sensory stimuli in mouse barrel cortex</article-title>. <source>Sci. Rep.</source> <volume>12</volume>:<fpage>19419</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-022-24156-y</pub-id>, <pub-id pub-id-type="pmid">36371511</pub-id></mixed-citation></ref>
<ref id="ref76"><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zuo</surname><given-names>Y.</given-names></name> <name><surname>Diamond</surname><given-names>M. E.</given-names></name></person-group> (<year>2019</year>). <article-title>Texture identification by bounded integration of sensory cortical signals</article-title>. <source>Curr. Biol.</source> <volume>29</volume>, <fpage>1425</fpage>&#x2013;<lpage>1435.e5</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cub.2019.03.017</pub-id>, <pub-id pub-id-type="pmid">31006571</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0002">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/275792/overview">Mehdi Adibi</ext-link>, Monash University, Australia</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0003">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/935239/overview">R&#x0103;zvan G&#x0103;m&#x0103;nu&#x021B;</ext-link>, Okinawa Institute of Science and Technology Graduate University, Japan</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1800614/overview">Anguo Zhang</ext-link>, Fuzhou University, China</p>
</fn>
</fn-group>
</back>
</article>