<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" article-type="research-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurosci.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neuroscience</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurosci.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1662-453X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fnins.2025.1735027</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Sequential analysis and its applications to neuromorphic engineering</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name><surname>Mani</surname> <given-names>Shivaram</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<uri xlink:href="https://loop.frontiersin.org/people/3260389"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Afshar</surname> <given-names>Saeed</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<uri xlink:href="https://loop.frontiersin.org/people/95660"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Monk</surname> <given-names>Travis</given-names></name>
<xref ref-type="aff" rid="aff1"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x00026; editing</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
</contrib>
</contrib-group>
<aff id="aff1"><institution>International Centre for Neuromorphic Systems, The MARCS Institute, Western Sydney University</institution>, <city>Sydney, NSW</city>, <country country="au">Australia</country></aff>
<author-notes>
<corresp id="c001"><label>&#x0002A;</label>Correspondence: Shivaram Mani, <email xlink:href="mailto:S.Mani@westernsydney.edu.au">S.Mani@westernsydney.edu.au</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-09">
<day>09</day>
<month>01</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2025</year>
</pub-date>
<volume>19</volume>
<elocation-id>1735027</elocation-id>
<history>
<date date-type="received">
<day>29</day>
<month>10</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>27</day>
<month>11</month>
<year>2025</year>
</date>
<date date-type="accepted">
<day>08</day>
<month>12</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2026 Mani, Afshar and Monk.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Mani, Afshar and Monk</copyright-holder>
<license>
<ali:license_ref start_date="2026-01-09">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction:</title>
<p>Neuromorphic circuits operate by comparing fluctuating signals to thresholds. This operation underpins sensing and computation in both neuromorphic architectures and biological nervous systems. Rigorous analysis of such systems is rarely attempted because the statistical tools to study them are both inaccessible and largely unknown to the neuromorphic community.</p>
</sec>
<sec>
<title>Methods</title>
<p>We offer a gentle introduction to one such tool, sequential analysis, a classical framework that addresses a particular class of threshold-crossing problems. We define the formal problem analyzed in sequential analysis and present Abraham Wald&#x00027;s elegant methodology for solving it.</p>
</sec>
<sec>
<title>Results</title>
<p>We then apply this framework to three examples in neuromorphic engineering, demonstrating how it can serve as a benchmark, proxy model, and design tool. Our introduction is understandable without prior training in probability or statistics.</p>
</sec>
<sec>
<title>Discussion</title>
<p>Sequential analysis provides the statistical limits of circuit performance, tractable abstractions of complex circuit behavior, and constructive rules for circuit design. It establishes rigorous statistical baselines for evaluating hardware. It links low-level circuit parameters to observable dynamics, clarifying the computational role of neuromorphic architectures. By translating performance goals into optimal thresholds and design parameters, it offers principled prescriptions that go beyond empirical tuning.</p>
</sec></abstract>
<kwd-group>
<kwd>applied statistics</kwd>
<kwd>event camera</kwd>
<kwd>event sensor</kwd>
<kwd>hypothesis testing</kwd>
<kwd>likelihood ratio</kwd>
<kwd>neuromorphic computing</kwd>
<kwd>sequential analysis</kwd>
<kwd>threshold crossing</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="7"/>
<table-count count="0"/>
<equation-count count="48"/>
<ref-count count="47"/>
<page-count count="14"/>
<word-count count="10238"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Neuromorphic Engineering</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="s1">
<label>1</label>
<title>Introduction</title>
<p>Neuromorphic engineering develops hardware and software systems based on the structure and function of nervous systems. Its principal goal is to design efficient, adaptive, and robust computation beyond conventional digital architectures (<xref ref-type="bibr" rid="B5">Christensen et al., 2022</xref>; <xref ref-type="bibr" rid="B14">Indiveri et al., 2011</xref>; <xref ref-type="bibr" rid="B25">Mead, 1989</xref>). Many neuromorphic devices encode and process information using discrete &#x0201C;spikes&#x0201D; or &#x0201C;events&#x0201D; (<xref ref-type="bibr" rid="B22">Mahowald, 1992</xref>). These spikes are typically generated when an internal variable, e.g., a voltage, crosses a threshold.</p>
<p>Threshold crossing problems are well-studied in statistics. They arise whenever a fluctuating process is compared to one or more boundaries (<xref ref-type="bibr" rid="B28">Monk et al., 2015</xref>, <xref ref-type="bibr" rid="B26">2024</xref>, <xref ref-type="bibr" rid="B27">2014</xref>; <xref ref-type="bibr" rid="B30">Monk and van Schaik, 2021</xref>, <xref ref-type="bibr" rid="B31">2022</xref>; <xref ref-type="bibr" rid="B10">Gold and Shadlen, 2007</xref>; <xref ref-type="bibr" rid="B29">Monk and van Schaik, 2020</xref>; <xref ref-type="bibr" rid="B40">Urzay et al., 2023</xref>). One example is a spiking neuron whose membrane potential exceeds the firing threshold (<xref ref-type="bibr" rid="B23">Mani et al., 2025</xref>; <xref ref-type="bibr" rid="B35">Shadlen and Shohamy, 2016</xref>; <xref ref-type="bibr" rid="B16">Kira et al., 2015</xref>). Another is a pixel in an event-based sensor whose voltage crosses &#x0201C;on&#x0201D; or &#x0201C;off&#x0201D; thresholds to generate events (<xref ref-type="bibr" rid="B9">Gallego et al., 2022</xref>). Statistics provides powerful tools for analyzing these problems (<xref ref-type="bibr" rid="B8">Doob, 1953</xref>; <xref ref-type="bibr" rid="B19">Lai, 2009b</xref>; <xref ref-type="bibr" rid="B38">Tartakovsky et al., 2014</xref>; <xref ref-type="bibr" rid="B39">Taylor and Karlin, 1984</xref>). But those tools remain largely inaccessible to the neuromorphic community. Much of the statistics literature is written in abstract mathematical language, which obscures its applicability to neuromorphic systems.</p>
<p>In this study, we introduce sequential analysis, a classical statistical framework for threshold crossing problems, to the neuromorphic community (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>). Sequential analysis was pioneered by Abraham Wald (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>, <xref ref-type="bibr" rid="B42">1947</xref>) to study optimal decision-making when data arrive over time. It provides exact results for decision accuracy, decision times, and optimal thresholds. We demonstrate that these results translate naturally to neuromorphic circuits. In the Methods, we present the formal problem of sequential analysis and its solution. Then, in the Results, we demonstrate how sequential analysis functions as a <bold>benchmark</bold>, <bold>proxy model</bold>, and <bold>design tool</bold> in three neuromorphic applications. We deliberately avoid technical jargon to make the derivations accessible to readers without a background in probability theory. All prerequisite materials for our derivations are available as <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref> online. Threshold crossing problems provide a rigorous and intuitive lens for benchmarking, interpreting, and designing neuromorphic architectures. By formalizing how evidence is accumulated toward a decision, sequential analysis provides a principled framework for interpreting the behavior and computational role of both neuromorphic circuits and biological neurons.</p>
</sec>
<sec sec-type="materials|methods" id="s2">
<label>2</label>
<title>Materials and methods</title>
<p>If any step in the following derivation is unclear, <xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref> walks through the underlying principles in plain language. No prior background in statistics is required to understand this material; only the SI.</p>
<p><xref ref-type="fig" rid="F1">Figure 1</xref> illustrates a sequential analysis problem (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>; <xref ref-type="bibr" rid="B29">Monk and van Schaik, 2020</xref>). Let <italic>S</italic><sub><italic>t</italic></sub> represent the cumulative sum of <italic>t</italic> realizations of a random variable <italic>X</italic>&#x0007E;Pr(<italic>X</italic>). At each time step, we observe a new realization of <italic>X</italic> and add it to the sum of all previous realizations, <italic>S</italic><sub><italic>t</italic>&#x02212;1</sub>. Thus, <italic>S</italic><sub><italic>t</italic></sub> is a random walk that changes by <italic>X</italic> at each time step. The key assumption of sequential analysis is that <italic>X</italic> is independent and identically distributed (i.i.d.) for all time steps.</p>
<fig position="float" id="F1">
<label>Figure 1</label>
<caption><p>Schematic of a sequential analysis problem. <bold>(Left)</bold> We observe realizations of a random variable <italic>X</italic>, one per time step. <italic>X</italic> is assumed to be independent and identically distributed at every time step. In this example, Pr(<italic>X</italic>) is a normal distribution with mean &#x003BC; and variance &#x003C3;<sup>2</sup> (magenta trace). Three samples from the first three time steps, <italic>X</italic><sub>1</sub>, <italic>X</italic><sub>2</sub>, and <italic>X</italic><sub>3</sub>, are shown in cyan, green, and red, respectively. <bold>(Right)</bold> Sequential analysis considers the cumulative sum <italic>S</italic><sub><italic>t</italic></sub> of those realizations of <italic>X</italic>. It compares <italic>S</italic><sub><italic>t</italic></sub> to two constant absorbing barriers <italic>a</italic> and <italic>b</italic> (e.g., on and off thresholds). While <italic>b</italic> &#x0003C; <italic>S</italic><sub><italic>t</italic></sub> &#x0003C; <italic>a</italic>, we observe new realizations of <italic>X</italic> and continue adding them to the cumulative sum. We want to find the probabilities that the sum hits either barrier before the other and the distribution of the number of realizations <italic>T</italic> required to hit it. In this example, <italic>S</italic><sub><italic>T</italic></sub> &#x0003D; <italic>a</italic> and <italic>T</italic> &#x0003D; 10 time steps (black dot, upper-right of the panel).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1735027-g0001.tif">
<alt-text content-type="machine-generated">A two-part diagram. On the left, a normal distribution curve with labeled points X1, X2, X3 at 1.23, -1.78, and -0.69. On the right, an absorbing random walk graph shows steps S0 to S3 with boundaries a = 3 and b = -2 over time T = 10.</alt-text>
</graphic>
</fig>
<p>The left panel in <xref ref-type="fig" rid="F1">Figure 1</xref> plots an example distribution Pr(<italic>X</italic>) as a normal distribution with mean &#x003BC; and variance &#x003C3;<sup>2</sup> (magenta trace). The left panel also shows three realizations drawn from Pr(<italic>X</italic>) at the first three time steps, shown in cyan, green, and red, respectively. The right panel in <xref ref-type="fig" rid="F1">Figure 1</xref> shows that we add each realization of <italic>X</italic> to the sum of all previous observations.</p>
<p>The right panel in <xref ref-type="fig" rid="F1">Figure 1</xref> also illustrates that sequential analysis assumes the random walk <italic>S</italic><sub><italic>t</italic></sub> to be between two constant thresholds <italic>b</italic> and <italic>a</italic> (horizontal dashed black lines). In this example, <italic>b</italic> &#x0003D; &#x02212;2, <italic>a</italic> &#x0003D; 3, and the initial sum is <italic>S</italic><sub>0</sub> &#x0003D; 0. As long as <italic>b</italic> &#x0003C; <italic>S</italic><sub><italic>t</italic></sub> &#x0003C; <italic>a</italic>, we continue making new observations of <italic>X</italic> and adding them to <italic>S</italic><sub><italic>t</italic></sub> (gray, cyan, green, and red dots). When <italic>S</italic><sub><italic>t</italic></sub> crosses either threshold (e.g., threshold <italic>a</italic> at random time <italic>T</italic> &#x0003D; 10, black dot, right panel), we stop the random walk. One goal of sequential analysis is to obtain the probability of crossing one threshold before the other, i.e., Pr(<italic>S</italic><sub><italic>T</italic></sub> &#x0003D; <italic>a</italic>) and Pr(<italic>S</italic><sub><italic>T</italic></sub> &#x0003D; <italic>b</italic>). Another goal is to find the conditional distributions of <italic>T</italic>, Pr(<italic>T</italic>&#x02223;<italic>S</italic><sub><italic>T</italic></sub> &#x0003D; <italic>a</italic>) and Pr(<italic>T</italic>&#x02223;<italic>S</italic><sub><italic>T</italic></sub> &#x0003D; <italic>b</italic>).</p>
<p>Abraham Wald derived threshold crossing probabilities and conditional time distributions from a martingale (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>; <xref ref-type="bibr" rid="B19">Lai, 2009b</xref>; <xref ref-type="bibr" rid="B8">Doob, 1953</xref>). Consider the conditional expectation:</p>
<disp-formula id="EQ1"><mml:math id="M1"><mml:mrow><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>where &#x003D5;<sub><italic>X</italic></sub>(<italic>h</italic>) is the MGF of <italic>X</italic> and <italic>h</italic> is its (real) independent variable. Wald&#x00027;s analysis requires that the MGF be well-defined within a specific domain of <italic>h</italic>. It also requires regularity conditions, e.g., integrability and boundedness. In most practical applications of sequential analysis (including the examples we will consider here), these conditions hold.</p>
<p>We can quickly show that this conditional expectation is a martingale. Notice that &#x003D5;<sub><italic>X</italic></sub>(<italic>h</italic>) is a deterministic function. Moreover, <italic>t</italic> is just the number of time steps that have elapsed up to a given time. Since neither is random, we can pull them out of the conditional expectation:</p>
<disp-formula id="EQ2"><mml:math id="M2"><mml:mrow><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Insert <italic>S</italic><sub><italic>t</italic></sub> &#x0003D; <italic>S</italic><sub><italic>t</italic>&#x02212;1</sub> &#x0002B; <italic>X</italic><sub><italic>t</italic></sub> in the right-hand side:</p>
<disp-formula id="EQ3"><mml:math id="M3"><mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Given <italic>S</italic><sub><italic>t</italic>&#x02212;1</sub>, the term exp(<italic>S</italic><sub><italic>t</italic>&#x02212;1</sub><italic>h</italic>) is not random, so pull it out:</p>
<disp-formula id="EQ4"><mml:math id="M4"><mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Since the <italic>X</italic> are i.i.d., <italic>X</italic><sub><italic>t</italic></sub> is independent of <italic>S</italic><sub><italic>t</italic> &#x02212; 1</sub>:</p>
<disp-formula id="EQ5"><mml:math id="M5"><mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Recognize the expectation as the MGF of <italic>X</italic> and simplify:</p>
<disp-formula id="EQ6"><mml:math id="M6"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Comparing this expression to our original conditional expectation, we observe that it forms a martingale:</p>
<disp-formula id="EQ7"><mml:math id="M7"><mml:mrow><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo> <mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow> <mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mi>&#x003D5;</mml:mi><mml:mi>X</mml:mi></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>h</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x02212;</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn><mml:mo stretchy='false'>)</mml:mo></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Next, we write this martingale as a conservation statement. Take the expectations of both sides and apply the law of total expectation:</p>
<disp-formula id="EQ8"><mml:math id="M8"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>By induction:</p>
<disp-formula id="EQ9"><mml:math id="M9"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>assuming that <italic>S</italic><sub>0</sub> is known (i.e., not random) and <italic>t</italic> begins at 0. This equation states that the expectation of the quantity <inline-formula><mml:math id="M10"><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>t</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula> is conserved throughout this stochastic process.</p>
<p>Doob&#x00027;s optional stopping theorem states that a randomly stopped martingale is still a martingale (<xref ref-type="bibr" rid="B8">Doob, 1953</xref>). Thus, insert the random stopping time <italic>T</italic> for <italic>t</italic>:</p>
<disp-formula id="EQ10"><mml:math id="M11"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(1)</label></disp-formula>
<p><xref ref-type="disp-formula" rid="EQ10">Equation 1</xref> is known as the fundamental identity in sequential analysis (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>; <xref ref-type="bibr" rid="B38">Tartakovsky et al., 2014</xref>). Wald derived threshold crossing probabilities and conditional time distributions from it (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>, <xref ref-type="bibr" rid="B42">1947</xref>). The reason we can extract so much threshold crossing information from it is that it is valid for all values of <italic>h</italic>. We extract our desired quantities by choosing special values of <italic>h</italic> and inserting them into <xref ref-type="disp-formula" rid="EQ10">Equation 1</xref> (<xref ref-type="bibr" rid="B29">Monk and van Schaik, 2020</xref>).</p>
<p><xref ref-type="fig" rid="F2">Figure 2</xref> visualizes the special values of <italic>h</italic> that we insert into the martingale. First, we calculate the probabilities of crossing the threshold. The left panel of <xref ref-type="fig" rid="F2">Figure 2</xref> plots the MGF of <italic>X</italic>, given the same distribution as shown in the left panel of <xref ref-type="fig" rid="F1">Figure 1</xref>. Given weak assumptions (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>), &#x003D5;<sub><italic>X</italic></sub>(<italic>h</italic>) is convex and crosses 1 at exactly two values of <italic>h</italic> (cyan lines and markers). Since all MGFs equal 1 at <italic>h</italic> &#x0003D; 0 by definition, we discard one of those crossings because it provides no useful information (square cyan marker, left panel <xref ref-type="fig" rid="F2">Figure 2</xref>). The second crossing occurs at a nontrivial value <italic>h</italic> &#x0003D; <italic>h</italic><sub>0</sub>&#x02260;0 (circle cyan marker). Inserting <italic>h</italic><sub>0</sub> into <xref ref-type="disp-formula" rid="EQ10">Equation 1</xref>:</p>
<disp-formula id="EQ11"><mml:math id="M12"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>For compact notation, let &#x003B1; &#x02261; Pr(<italic>S</italic><sub><italic>T</italic></sub> &#x0003D; <italic>a</italic>) and &#x003B2; &#x02261; Pr(<italic>S</italic><sub><italic>T</italic></sub> &#x0003D; <italic>b</italic>). Split the expectation, conditional on crossing either threshold first:</p>
<disp-formula id="EQ12"><mml:math id="M13"><mml:mrow><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>a</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>+</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mi>&#x003B2;</mml:mi><mml:mo>=</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>0</mml:mn></mml:msub></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Given that <italic>S</italic><sub><italic>T</italic></sub> is <italic>a</italic> or <italic>b</italic>, the terms in the conditional expectations are just constants (i.e., not random):</p>
<disp-formula id="EQ13"><mml:math id="M14"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mi>&#x003B1;</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mi>&#x003B2;</mml:mi><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The process will cross either threshold in finite time (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>), so we can insert &#x003B2; &#x0003D; 1&#x02212;&#x003B1; and rearrange:</p>
<disp-formula id="EQ14"><mml:math id="M15"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003B1;</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(2)</label></disp-formula>
<p>The right panel of <xref ref-type="fig" rid="F2">Figure 2</xref> shows the values of <italic>h</italic> that we insert into <xref ref-type="disp-formula" rid="EQ10">Equation 1</xref> to obtain the conditional characteristic functions (CCFs) of threshold crossing times. Under weak assumptions (<xref ref-type="bibr" rid="B41">Wald, 1944</xref>), &#x003D5;<sub><italic>X</italic></sub>(<italic>h</italic>) has two real-valued crossings of a horizontal line in the <italic>neighborhood</italic> of 1, and not just at 1 (magenta lines and markers, left panel <xref ref-type="fig" rid="F2">Figure 2</xref>). So its logarithm has two real roots in <italic>h</italic> in that neighborhood. Then for imaginary &#x003C4;, &#x02212;log&#x003D5;<sub><italic>X</italic></sub>(<italic>h</italic>) &#x0003D; &#x003C4; has two complex roots <italic>h</italic><sub>1</sub>(&#x003C4;) and <italic>h</italic><sub>2</sub>(&#x003C4;):</p>
<disp-formula id="EQ15"><mml:math id="M16"><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003C4;</mml:mi></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Insert <italic>h</italic><sub>1</sub>(&#x003C4;) into <xref ref-type="disp-formula" rid="EQ10">Equation 1</xref>:</p>
<disp-formula id="EQ16"><mml:math id="M17"><mml:mrow><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>&#x003C4;</mml:mi><mml:mi>T</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Split the expectation, conditional on hitting either threshold first:</p>
<disp-formula id="EQ17"><mml:math id="M18"><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:msup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x003C4;</mml:mi><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>a</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x02212;</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:msup><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x003C4;</mml:mi><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Given which threshold was crossed first, exp(<italic>S</italic><sub><italic>T</italic></sub><italic>h</italic><sub>1</sub>) is not random. Pull it out of the conditional expectations:</p>
<disp-formula id="EQ18"><mml:math id="M19"><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:msup><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x003C4;</mml:mi><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>a</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x02212;</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:msup><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x003C4;</mml:mi><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:msub><mml:mi>h</mml:mi><mml:mn>1</mml:mn></mml:msub></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Recognize the conditional expectations as the CCFs of <italic>T</italic>, &#x003C8;<sub><italic>T</italic>|<italic>a</italic></sub>(&#x003C4;) and &#x003C8;<sub><italic>T</italic>|<italic>b</italic></sub>(&#x003C4;):</p>
<disp-formula id="EQ19"><mml:math id="M20"><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow></mml:math></disp-formula>
<p>Now, insert <italic>h</italic><sub>2</sub>(&#x003C4;) into <xref ref-type="disp-formula" rid="EQ10">Equation 1</xref>, repeat the same argument, and we have a system of two equations:</p>
<disp-formula id="EQ20"><mml:math id="M21"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003B1;</mml:mi><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>&#x003B1;</mml:mi><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Since we have two equations, we can rearrange for both CCFs:</p>
<disp-formula id="EQ21"><mml:math id="M22"><mml:mtable class="eqnarray" columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>;</mml:mo></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math><label>(3)</label></disp-formula>
<p>By the law of total expectations, the marginal CF of T is as follows:</p>
<disp-formula id="EQ22"><mml:math id="M24"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>When &#x1D53C;[<italic>X</italic>] &#x0003D; 0, <xref ref-type="disp-formula" rid="EQ14">Equation 2</xref> is undefined. In this special case, &#x003D5;<sub><italic>X</italic></sub>(<italic>h</italic>) only crosses 1 once, at the trivial value <italic>h</italic> &#x0003D; <italic>h</italic><sub>0</sub> &#x0003D; 0. We circumvent this issue by taking the limit of <xref ref-type="disp-formula" rid="EQ14">Equation 2</xref> as <italic>h</italic><sub>0</sub> &#x02192; 0:</p>
<disp-formula id="EQ23"><mml:math id="M25"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mstyle displaystyle="true"><mml:munder class="msub"><mml:mrow><mml:mo class="qopname">lim</mml:mo></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>&#x02192;</mml:mo><mml:mn>0</mml:mn></mml:mrow></mml:munder></mml:mstyle><mml:mi>&#x003B1;</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo>-</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Alternatively, we can find a simpler martingale that allows us to find &#x003B1; in this special case:</p>
<disp-formula id="EQ24"><mml:math id="M26"><mml:mrow><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>X</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>X</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x02212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mrow></mml:math></disp-formula>
<p>and we have another martingale. Writing it as a conservation statement and invoking Doob&#x00027;s optional stopping theorem:</p>
<disp-formula id="EQ25"><mml:math id="M27"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Split the expectation conditional on which threshold was crossed:</p>
<disp-formula id="EQ26"><mml:math id="M28"><mml:mrow><mml:mi>&#x003B1;</mml:mi><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>a</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mo stretchy='false'>(</mml:mo><mml:mn>1</mml:mn><mml:mo>&#x02212;</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>&#x0007C;</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>T</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mn>0</mml:mn></mml:msub><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>
<p>Evaluate the conditional expectations and rearrange:</p>
<disp-formula id="EQ27"><mml:math id="M29"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003B1;</mml:mi><mml:mi>a</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>b</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mtext>&#x02003;</mml:mtext><mml:mo>&#x02192;</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:mi>&#x003B1;</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msub><mml:mrow><mml:mi>S</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:mi>b</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mo>-</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>and we recover the same expression.</p>
<fig position="float" id="F2">
<label>Figure 2</label>
<caption><p>Special values of <italic>h</italic> extract threshold crossing probabilities and times from Wald&#x00027;s martingale. <bold>(Left)</bold> Under weak assumptions, the MGF of a random variable is convex and crosses 1 at exactly two values of <italic>h</italic> (cyan markers and lines). All MGFs cross 1 at <italic>h</italic> &#x0003D; 0 (square cyan marker), so that crossing is trivial and we discard it. The other crossing is nontrivial, and we use it to compute threshold-crossing probabilities. Furthermore, under those weak assumptions, the MGF crosses one twice in the <italic>neighborhood</italic> of 1 (magenta markers and lines). Then the logarithm of the MGF has two real roots in <italic>h</italic> in the neighborhood of 0. So for imaginary &#x003C4;, the logarithm of the MGF has two complex roots. <bold>(Right)</bold> Those two complex roots <italic>h</italic><sub>1</sub>(&#x003C4;) and <italic>h</italic><sub>2</sub>(&#x003C4;) for the MGF in the left panel. We use them to calculate conditional threshold crossing times. Solid traces represent one root, and dashed traces represent the other. Red and gray traces represent the real and imaginary parts of those roots, respectively. When &#x003C4; &#x0003D; 0, those complex roots pass through the MGF crossings of 1 (cyan markers and lines).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1735027-g0002.tif">
<alt-text content-type="machine-generated">Two graphs are displayed. The left graph shows a yellow parabolic curve representing the function \(\phi_X(h) = \exp(\mu h + \sigma^2 h^2 / 2) \) with annotated points at different \(h_0 \) values. The right graph depicts two intersecting curves, one solid and one dashed, representing \(h_1(\tau) \) and \(h_2(\tau) \) over a complex plane with values ranging from \(-0.25i\) to \(0.25i\), with key points marked at \(h_0 \).</alt-text>
</graphic>
</fig>
<p>Wald&#x00027;s analysis is exact when <italic>S</italic><sub><italic>T</italic></sub> hits either threshold exactly, i.e., when there is never any threshold overshoot. Generally speaking, <italic>S</italic><sub><italic>T</italic></sub> can overshoot thresholds more when the variance of <italic>X</italic> is high. If the threshold overshoot is large with respect to the distance between the thresholds, then Wald&#x00027;s analysis can yield inaccurate estimates of threshold crossing probabilities and times. The literature reports techniques to estimate and/or bound threshold overshoot in order to obtain more accurate approximations of these quantities (<xref ref-type="bibr" rid="B18">Lai, 2009a</xref>). Since this paper is an introduction to sequential analysis, these techniques are beyond the scope of this study. We can often reduce barrier overshoot by defining the time step to be extremely brief so that <italic>S</italic><sub><italic>t</italic></sub> changes only slightly from one time step to the next. This approach is valid for many neuromorphic applications, in which a continuous quantity fluctuates between thresholds.</p>
</sec>
<sec sec-type="results" id="s3">
<label>3</label>
<title>Results</title>
<p>We now show how to apply sequential analysis in neuromorphic engineering, including worked examples.</p>
<sec>
<label>3.1</label>
<title>Characterizing idealized event sensor pixel noise</title>
<p><xref ref-type="fig" rid="F3">Figure 3</xref> illustrates key differences between conventional and neuromorphic vision sensors. Panel A is a schematic of a visual scene representation by a conventional camera. Conventional cameras discretize an analog signal (i.e., incident light, top plot) uniformly across space (by pixels) and time (by frames). They output a series of digitized frames comprising pixel values that describe the light they observed at uniformly spaced moments in time (panel A, bottom plot). Panel B shows how neuromorphic vision sensors (e.g., event sensors) represent a scene. Event sensor pixels generate events in response to changes in light intensity, typically by monitoring changes in log intensity or voltage (<xref ref-type="bibr" rid="B9">Gallego et al., 2022</xref>). When the integrated change exceeds a certain positive threshold, an &#x02018;on event&#x00027; is generated (green shaded area and raster plot, panel B). Conversely, if the change is sufficiently negative, an &#x02018;off event&#x00027; is triggered (red shaded area and raster plot, panel B). Output events can occur within microseconds of a change in light intensity.</p>
<fig position="float" id="F3">
<label>Figure 3</label>
<caption><p>Event sensors represent visual data differently than conventional cameras. <bold>(A)</bold> Conventional sensors convert an analog signal (top plot) into a series of frames that uniformly discretize the signal in space and time (bottom plot). <bold>(B)</bold> Neuromorphic sensors efficiently represent that analog signal (top plot) with precisely timed events (raster plots, bottom) when it increases (ON, green) or decreases (OFF, red). <bold>(C)</bold> Simplified circuit diagram of a neuromorphic vision sensor pixel. <bold>(D)</bold> Illustrative log-intensity of photons over time log &#x003BB;(<italic>t</italic>) on the highlighted pixel in <bold>(E)</bold>. <bold>(F)</bold> Corresponding voltage trajectory <italic>V</italic>(<italic>t</italic>) (cyan trace) for the highlighted neuromorphic pixel in panel G. <italic>V</italic>(<italic>t</italic>) approximates the change in log photon intensity from <bold>(D)</bold>. <italic>V</italic>(<italic>t</italic>) is compared to on (green) and off (red) thresholds. Threshold crossings generate events that mark the microsecond at which the log photon intensity changed beyond the threshold. When either threshold is crossed, <italic>V</italic>(<italic>t</italic>) is reset to the other threshold. Collectively, the panels demonstrate key advantages of neuromorphic sensing: microsecond temporal resolution, sparse output proportional to scene dynamics, and in-pixel analog preprocessing that reduces bandwidth and energy consumption.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1735027-g0003.tif">
<alt-text content-type="machine-generated">Diagram comparing conventional sensory encoding and neuromorphic sensing. Panel A shows conventional encoding with smooth stimulus and a stepwise digital signal. Panel B illustrates neuromorphic sensing with ON and OFF spikes. Panel C presents an event-based sensor pixel with light input and ON/OFF events. Panel D graphs log stimulus, panel E shows an image of a person walking outside, and panel F displays voltage versus time for ON/OFF spikes. Panel G illustrates processed image data with highlighted features.</alt-text>
</graphic>
</fig>
<p><xref ref-type="fig" rid="F3">Figure 3C</xref> is a diagram illustrating how an event sensor pixel achieves such remarkable temporal precision. A photodiode transduces photons and charges a capacitor. The charging current is proportional to the change in the log intensity (log&#x003BB;(<italic>t</italic>) of the incoming photons. Panel D shows a hypothetical log intensity of photons incident on a single pixel from the image of panel E. Panel F illustrates that the event sensor pixel then compares the voltage across the capacitor to on (green horizontal line) and off (red horizontal line) thresholds. When the voltage exceeds either threshold, the pixel generates a corresponding event. The green and red raster plots in panel F present hypothetical event output of a single pixel from the neuromorphic &#x02018;image&#x00027; in panel G. The circuitry implementing this comparison and event generation is not shown in panel C. Event sensors can output data at different rates depending on the scene that they are filming. If nothing in the scene is moving with respect to the sensor, its output events are sparse, and its resulting output data rate can be very low; vice versa. For example, the background of the scene illustrated in panel E is not moving with respect to the event sensor. Thus, event sensor pixels with a static background in their field of view will output few events, as shown in panel G. In contrast, conventional cameras output the same amount of data regardless of the scene.</p>
<p>Theoretically, if nothing in a scene changes, the event sensor should not output any events. However, in practice, event pixels generate &#x02018;noisy&#x00027; events even in a static scene with no true intensity change (<xref ref-type="bibr" rid="B34">Padala et al., 2018</xref>). Since there is no change in intensity, the light incident on a pixel during a timestep does not vary (regardless of how we define a timestep). Therefore, the key assumption of sequential analysis (i.i.d. timesteps) is met. We can then use sequential analysis to characterize the statistics of those noisy events, at least for idealized pixels. Even if sequential analysis does not accurately model the circuitry of a real event sensor pixel, it provides a benchmark for comparing the statistical properties of noisy events from a real event sensor.</p>
<p>The top plot in <xref ref-type="fig" rid="F4">Figure 4</xref> considers an idealized event sensor pixel as a sequential analysis problem. Let the cumulative sum <italic>S</italic><sub><italic>t</italic></sub> represent the voltage of the pixel <italic>V</italic><sub><italic>t</italic></sub> at time <italic>t</italic>. Let the change in the sum <italic>X</italic> be the change in the pixel voltage &#x00394;<italic>V</italic> on a time step. To keep our calculations simple, let <inline-formula><mml:math id="M30"><mml:mo>&#x00394;</mml:mo><mml:mi>V</mml:mi><mml:mo>&#x0007E;</mml:mo><mml:mrow><mml:mi mathvariant="script">N</mml:mi></mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003BC;</mml:mi><mml:mo>,</mml:mo><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:math></inline-formula>. The thresholds <italic>a</italic> and <italic>b</italic> directly map to the on and off thresholds of the pixel. The top plot also shows two example realizations of the voltage path. One crosses the on threshold first (red trace and dashed line), and the other crosses the off threshold first (blue trace and dashed line). The waiting times for both voltage-path realizations are shown on the <italic>x</italic>-axis.</p>
<fig position="float" id="F4">
<label>Figure 4</label>
<caption><p>Noisy events from idealized event sensor pixels receiving constant light intensity is a sequential analysis problem. <bold>(Top row)</bold> Schematic of a single event sensor pixel&#x00027;s voltage fluctuating between an on and off threshold. If we assume that the pixel receives constant light intensity and the change in pixel voltage on a time step is i.i.d. We want to find the probability that the pixel generates an on or off event. The conditional waiting time distributions are used to do so. Two example voltage-path realizations are shown: one ultimately crosses the on threshold (red trace, upper dashed line) and the other crosses the off threshold (blue trace, lower dashed line). The waiting times of both paths are shown on the <italic>x</italic>-axis. <bold>(Middle row)</bold> Theoretical (solid traces) and simulation (dashed traces) CCFs and threshold crossing probabilities are practically identical. Real (pink/red) and imaginary (gray/black) parts of CCFs are plotted separately. <bold>(Bottom row)</bold> Analogous to the middle row, but with conditional probability distributions instead of CCFs. The red and blue traces are effectively inverse Fourier transforms of the CCFs above. The gold histograms are simulation results for conditional waiting times at threshold crossings. The two voltage-path realizations from the top row are plotted as samples from those distributions (red and blue bars). Simulation curves reflect 100,000 independent trials; sampling variance is below the line thickness at the plotted scale.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1735027-g0004.tif">
<alt-text content-type="machine-generated">Graphical illustration showing pixel voltage and probability distribution analysis. The top graph displays pixel voltage variations over time, divided into two phases: on (pink) and off (blue). Two middle graphs compare theoretical and simulated data for on and off states using complex functions of &#x003C4;, highlighting agreement between theory and simulations. The bottom graphs depict probability distributions, showing Pr(T|V_T = on) peaking around 58,102 and Pr(T|V_T = off) peaking around 33,543. Yellow bars in histograms represent distributions with marked peaks indicating key values for each state.</alt-text>
</graphic>
</fig>
<p>First, we calculate the probability that a pixel generates an on or off event first, i.e., threshold crossing probabilities. The MGF of &#x00394;<italic>V</italic> is as follows:</p>
<disp-formula id="EQ28"><mml:math id="M31"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mo>&#x00394;</mml:mo><mml:mi>V</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo class="qopname">exp</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003BC;</mml:mi><mml:mi>h</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>This MGF was plotted in the left panel of <xref ref-type="fig" rid="F2">Figure 2</xref>. &#x003D5;<sub>&#x00394;<italic>V</italic></sub>(<italic>h</italic>) crosses one twice, once at zero and again at a non-trivial value:</p>
<disp-formula id="EQ29"><mml:math id="M32"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo class="qopname">exp</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003BC;</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msubsup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mtext>&#x02003;</mml:mtext><mml:mo>&#x02192;</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo>-</mml:mo><mml:mn>2</mml:mn><mml:mi>&#x003BC;</mml:mi><mml:mo>/</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Inserting <italic>h</italic><sub>0</sub> into <xref ref-type="disp-formula" rid="EQ14">Equation 2</xref>, we determine the probability that the pixel will generate an on event before an off event. Put in some example parameter values <italic>a</italic> &#x0003D; 3, <italic>b</italic> &#x0003D; &#x02212;2, <italic>V</italic><sub>0</sub> &#x0003D; 0, &#x003BC; &#x0003D; 10<sup>&#x02212;5</sup>, and &#x003C3; &#x0003D; 10<sup>&#x02212;2</sup>:</p>
<disp-formula id="EQ30"><mml:math id="M33"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>&#x003B1;</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>4</mml:mn></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>6</mml:mn></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn><mml:mo>.</mml:mo><mml:mn>4</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>&#x02248;</mml:mo><mml:mo>.</mml:mo><mml:mn>522</mml:mn><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Next, we calculate waiting-time CCFs for noisy on- and off-events. For imaginary &#x003C4;, &#x02212;log&#x003D5;<sub>&#x00394;<italic>V</italic></sub>(<italic>h</italic>) &#x0003D; &#x003C4; has two complex roots <italic>h</italic><sub>1</sub>(&#x003C4;) and <italic>h</italic><sub>2</sub>(&#x003C4;) (right panel, <xref ref-type="fig" rid="F2">Figure 2</xref>). Obtaining their expressions is straightforward:</p>
<disp-formula id="EQ31"><mml:math id="M34"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003BC;</mml:mi><mml:mi>h</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:msup><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>&#x003C4;</mml:mi><mml:mtext>&#x02003;</mml:mtext><mml:mo>&#x02192;</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003BC;</mml:mi><mml:mo>&#x000B1;</mml:mo><mml:msqrt><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x003BC;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:mn>2</mml:mn><mml:msup><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mi>&#x003C4;</mml:mi></mml:mrow></mml:msqrt></mml:mrow><mml:mrow><mml:msup><mml:mrow><mml:mi>&#x003C3;</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>One root, say <italic>h</italic><sub>1</sub>(&#x003C4;), is given by one sign in the &#x000B1; symbol, say &#x0002B;. The other root <italic>h</italic><sub>2</sub>(&#x003C4;) is given by the other sign. The right panel in <xref ref-type="fig" rid="F2">Figure 2</xref> plots these two complex roots. Inserting them into <xref ref-type="disp-formula" rid="EQ21">Equations 3</xref>, we find the waiting time CCFs of the idealized pixel&#x00027;s noisy events.</p>
<p>The middle row of <xref ref-type="fig" rid="F4">Figure 4</xref> plots these CCFs for the parameter values stated above (thick solid traces). Pink and gray traces plot the real and imaginary parts of the CCFs, respectively. We also print the threshold crossing probabilities &#x003B1; and 1&#x02212;&#x003B1;. These panels also compare simulation results with theoretical results. We ran 100,000 independent simulations of this idealized event sensor pixel with the parameter values stated above. We saved which threshold was crossed first and how many time steps the process took to reach it. Then we computed the Fourier transforms of our resulting waiting-time distributions to compare them with our theoretical CCFs. The reported numbers in the panels indicate very strong agreement between our calculated and simulated threshold-crossing probabilities. The dashed red and black traces in the middle panels show that our simulated CCFs very closely match our theoretical CCFs. Since we assumed that the pixel voltage changes only very slightly at each time step, the voltage is extremely unlikely to overshoot either threshold appreciably. So Wald&#x00027;s analysis is practically exact, and we ran enough simulations to converge on his solution.</p>
<p>The bottom row of <xref ref-type="fig" rid="F4">Figure 4</xref> shows that we can recover conditional probability distributions of <italic>T</italic> from those CCFs. For example, we find Pr(<italic>T</italic>|<italic>V</italic><sub><italic>T</italic></sub> &#x0003D; on) from &#x003C8;<sub><italic>T</italic>|on</sub>(&#x003C4;) via the inverse Fourier transform:</p>
<disp-formula id="EQ32"><mml:math id="M35"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mtext>Pr</mml:mtext><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mi>V</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mtext>on</mml:mtext></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn><mml:mi>&#x003C0;</mml:mi></mml:mrow></mml:mfrac><mml:mstyle displaystyle="true"><mml:msub><mml:mrow><mml:mo class="qopname">&#x0222B;</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x0211D;</mml:mo></mml:mrow></mml:msub></mml:mstyle><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003C4;</mml:mi><mml:mi>T</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mtext>on</mml:mtext></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>d</mml:mi><mml:mi>&#x003C4;</mml:mi><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Pr(<italic>T</italic>|<italic>V</italic><sub><italic>T</italic></sub> &#x0003D; off) is found analogously. The red and blue traces in the bottom row show the conditional probability distributions calculated from the CCFs in the middle row. The gold bars are histograms of our simulation results. Again, we see a very strong match between Wald&#x00027;s analysis and our simulation results. The two example voltage-path realizations from the top row are also displayed as samples from their corresponding conditional waiting-time distributions.</p>
<p>We can construct other waiting time CFs from &#x003C8;<sub><italic>T</italic>|<italic>a</italic></sub>(&#x003C4;) and &#x003C8;<sub><italic>T</italic>|<italic>b</italic></sub>(&#x003C4;). For example, let <italic>A</italic> represent the waiting time until a pixel generates an on event. Let <italic>B</italic> represent the number of off events that occur while we wait for the on event. Since each threshold crossing is an i.i.d. trial, the CCF of <italic>A</italic> given <italic>B</italic> is:</p>
<disp-formula id="EQ33"><mml:math id="M36"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi><mml:mo>|</mml:mo><mml:mi>B</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>We can calculate the CF of <italic>A</italic> from this CCF:</p>
<disp-formula id="EQ34"><mml:math id="M37"><mml:mtable columnalign='left'><mml:mtr><mml:mtd><mml:msub><mml:mi>&#x003C8;</mml:mi><mml:mi>A</mml:mi></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>&#x003C4;</mml:mi><mml:mo stretchy='false'>)</mml:mo><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x003C4;</mml:mi><mml:mi>A</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msup><mml:mi>e</mml:mi><mml:mrow><mml:mi>&#x003C4;</mml:mi><mml:mi>A</mml:mi></mml:mrow></mml:msup><mml:mo>&#x0007C;</mml:mo><mml:mi>B</mml:mi></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>&#x003C8;</mml:mi><mml:mrow><mml:mi>A</mml:mi><mml:mo>&#x0007C;</mml:mo><mml:mi>B</mml:mi></mml:mrow></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>&#x003C4;</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mtext>&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x000A0;&#x02003;&#x02003;&#x02003;&#x02003;</mml:mtext><mml:mo>=</mml:mo><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mi>&#x003C8;</mml:mi><mml:mrow><mml:mi>T</mml:mi><mml:mo>&#x0007C;</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mo stretchy='false'>(</mml:mo><mml:mi>&#x003C4;</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mi>B</mml:mi></mml:msup><mml:msub><mml:mi>&#x003C8;</mml:mi><mml:mrow><mml:mi>T</mml:mi><mml:mo>&#x0007C;</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mo stretchy='false'>(</mml:mo><mml:mi>&#x003C4;</mml:mi><mml:mo stretchy='false'>)</mml:mo></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>We evaluate this expectation by noting that <italic>B</italic>&#x0007E;Geom(1&#x02212;&#x003B1;):</p>
<disp-formula id="EQ35"><mml:math id="M38"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mo>&#x1D53C;</mml:mo><mml:mrow><mml:mo>[</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mo>]</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:munderover accentunder="false" accent="false"><mml:mrow><mml:mo>&#x02211;</mml:mo></mml:mrow><mml:mrow><mml:mi>B</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>&#x0221E;</mml:mi></mml:mrow></mml:munderover></mml:mstyle><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msup><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mrow><mml:mi>B</mml:mi></mml:mrow></mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>then recognizing the sum as a geometric series:</p>
<disp-formula id="EQ36"><mml:math id="M39"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>A</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:mi>&#x003B1;</mml:mi><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
</sec>
<sec>
<label>3.2</label>
<title>Leaky current-integrating node given SPAD input</title>
<p><xref ref-type="fig" rid="F5">Figure 5</xref> shows another hardware implementation of a sequential analysis problem (<xref ref-type="bibr" rid="B6">Delic and Afshar, 2020</xref>). A single-photon avalanche diode (SPAD) operated in Geiger mode converts every detected photon into a precisely timed electrical impulse. Early SPAD-event processing circuits illustrate this principle (<xref ref-type="bibr" rid="B1">Afshar et al., 2020</xref>). Efficient real-time processing of SPAD array data is the subject of active investigation (<xref ref-type="bibr" rid="B11">Gyongy et al., 2020</xref>; <xref ref-type="bibr" rid="B7">Delic and Afshar, 2024</xref>). The gold trace in the top plot of <xref ref-type="fig" rid="F5">Figure 5</xref> is a hypothetical cumulative count of photons detected by a SPAD (left <italic>y</italic>-axis, top plot). When that SPAD feeds a current-integrating node with a finite leak conductance, the node voltage <italic>V</italic>(<italic>t</italic>) executes a biased random walk. <italic>V</italic>(<italic>t</italic>) jumps up by a fixed amount <italic>u</italic> upon each photon arrival and drifts down with constant slope <italic>m</italic> between arrivals (<xref ref-type="bibr" rid="B7">Delic and Afshar, 2024</xref>; <xref ref-type="bibr" rid="B32">Morrison et al., 2020</xref>). The cyan trace in the top plot is a realization of that random walk, given the observation of photons (right <italic>y</italic>-axis, top plot). When <italic>V</italic>(<italic>t</italic>) crosses an on or off threshold (dashed green and red lines in the top plot), an event is generated (shown in green and red raster plots), and <italic>V</italic>(<italic>t</italic>) is reset between these points. Sequential analysis can thoroughly and tractably analyze this circuit.</p>
<fig position="float" id="F5">
<label>Figure 5</label>
<caption><p>Diagram and operation of a leaky capacitive node that integrates SPAD sensor input. <bold>(Top plot)</bold> A single-photon avalanche diode (SPAD) converts each detected photon (the gold trace is the cumulative photon count, left y-axis) into identical current impulses that charge a capacitive node. The voltage across this node <italic>V</italic>(<italic>t</italic>) (cyan trace, right <italic>y</italic>-axis) leaks through a constant conductance and decays at a rate <italic>m</italic> between photon arrivals. Each photon arrival causes an instantaneous voltage jump of <italic>u</italic> in <italic>V</italic>(<italic>t</italic>). Two programmable comparators monitor <italic>V</italic>(<italic>t</italic>): an upper (on) threshold and a lower (off) threshold. When <italic>V</italic>(<italic>t</italic>) crosses the on threshold, an on event is emitted, and vice versa. After each event, <italic>V</italic><sub><italic>t</italic></sub> is reset between the thresholds. Notice that on events are more frequent as the photon count increases, and off events are more frequent when the photon count stagnates. <bold>(Middle plot)</bold> The leak rate <italic>m</italic> is decreased or increased after each on or off event, respectively (blue trace). This adaptive change implements a refractory-like gain control mechanism that balances the event rate between the two thresholds. <bold>(Bottom plot)</bold> Circuit diagram of the SPAD and capacitive node.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1735027-g0005.tif">
<alt-text content-type="machine-generated">Graph showing photon counts over 10,000 clock cycles with a fluctuating cyan line and a solid yellow line. Labels indicate &#x0201C;ON&#x0201D; and &#x0201C;OFF&#x0201D; states. Below, a flowchart illustrates a system with SPAD and decrement counters linked to event detectors, controlled by a digital phase-locked loop.</alt-text>
</graphic>
</fig>
<p>The tractability of sequential analysis is a powerful feature. Absorption probabilities and times are expressed as functions of the problem&#x00027;s input parameters. Thus, if we change some parameter values upon threshold crossing, Wald&#x00027;s methodology remains applicable for analyzing future threshold crossings. For example, say we change the value of the decay rate <italic>m</italic> after every threshold crossing. We could increase or decrease it depending on which threshold is crossed to achieve a habituation-like gain-control mechanism. The blue trace in the middle plot of <xref ref-type="fig" rid="F5">Figure 5</xref> is a realization of <italic>m</italic> when we implement such a rule. Every time an on event is generated, the slope decreases, and vice versa. Thus, every event biases <italic>V</italic>(<italic>t</italic>) to hit the off threshold on the next random walk. In <xref ref-type="fig" rid="F5">Figure 5</xref>, notice that an increasing photon count initially causes the current-integrating node to frequently generate on events. However, as <italic>m</italic> decreases, the on events habituate, and we begin to observe off events. Then, when the photon count stagnates, we observe the reverse effect. We could achieve a similar gain-control effect through other mechanisms. Upon crossing the threshold, we change the values of the thresholds to bias future crossings analogously. The circuit at the bottom of <xref ref-type="fig" rid="F5">Figure 5</xref> is a diagram of how to implement these types of feedback mechanisms in hardware. Whichever mechanism(s) we use, sequential analysis tells us how to compute threshold-crossing probabilities and times.</p>
<p>Define a &#x02018;time step&#x00027; to be the waiting time until a photon arrival, including the arrival itself. We model photon arrival to the SPAD as a Poisson process with rate &#x003BB;. Then the waiting time until photon arrival <italic>E</italic> is exponentially distributed; <italic>E</italic> &#x0007E; Exp(&#x003BB;). While waiting for the photon to arrive at the SPAD, the node voltage decays linearly at a rate <italic>m</italic> &#x0003C; 0. When a photon arrives, its voltage bumps up by a constant amount <italic>u</italic> &#x0003E; 0. Thus, the change in the node&#x00027;s voltage over a time step is as follows:</p>
<disp-formula id="EQ37"><mml:math id="M40"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>X</mml:mi><mml:mo>=</mml:mo><mml:mi>m</mml:mi><mml:mi>E</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>u</mml:mi><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The MGF of <italic>X</italic> is as follows:</p>
<disp-formula id="EQ38"><mml:math id="M41"><mml:mtable columnalign="center"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi><mml:mi>E</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>E</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msup><mml:mfrac><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:mo>-</mml:mo><mml:mi>m</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:mfrac><mml:mtext>&#x02003;</mml:mtext><mml:mtext class="textrm" mathvariant="normal">for&#x000A0;</mml:mtext><mml:mi>m</mml:mi><mml:mi>h</mml:mi><mml:mo>&#x0003C;</mml:mo><mml:mo>&#x003BB;</mml:mo><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Again, we begin by calculating threshold crossing probabilities. It is noteworthy that we cannot find a closed-form expression for the non-trivial crossing at &#x003D5;<sub><italic>X</italic></sub>(<italic>h</italic><sub>0</sub>) &#x0003D; 1. We could numerically evaluate that crossing, but instead, we will employ an approximation. Assume that &#x1D53C;[<italic>X</italic>]&#x02248;0 so that <italic>h</italic><sub>0</sub>&#x02248;0. Taylor expanding the exponential:</p>
<disp-formula id="EQ39"><mml:math id="M42"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:msup><mml:mfrac><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:mo>-</mml:mo><mml:mi>m</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mo>&#x02248;</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x0002B;</mml:mo><mml:mi>u</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>&#x0002B;</mml:mo><mml:msup><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msubsup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msubsup><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mfrac><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:mo>-</mml:mo><mml:mi>m</mml:mi><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Rearranging for <italic>h</italic><sub>0</sub>:</p>
<disp-formula id="EQ40"><mml:math id="M43"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>&#x02248;</mml:mo><mml:mfrac><mml:mrow><mml:mo>-</mml:mo><mml:mn>2</mml:mn><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>m</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mo>&#x003BB;</mml:mo><mml:mi>u</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:msup><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p><xref ref-type="disp-formula" rid="EQ14">Equation 2</xref> is very sensitive to the value of <italic>h</italic><sub>0</sub> used because <italic>h</italic><sub>0</sub> appears in exponentials. Thus, our approximation for <italic>h</italic><sub>0</sub> must be very accurate to yield accurate approximations of threshold crossing probabilities. We can enhance the accuracy of our approximation by adding more terms to the Taylor expansion of the exponential. Or we can revert to a numerical solver to achieve sufficient accuracy for Wald&#x00027;s analysis. Whatever method we use to obtain <italic>h</italic><sub>0</sub>, threshold crossing probabilities are then given by <xref ref-type="disp-formula" rid="EQ14">Equation 2</xref>.</p>
<p>Next, we obtain waiting time CCFs by finding two complex roots <italic>h</italic><sub>1</sub>(&#x003C4;) and <italic>h</italic><sub>2</sub>(&#x003C4;) to the equation <inline-formula><mml:math id="M44"><mml:msub><mml:mrow><mml:mi>&#x003D5;</mml:mi></mml:mrow><mml:mrow><mml:mi>X</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003C4;</mml:mi></mml:mrow></mml:msup></mml:math></inline-formula>:</p>
<disp-formula id="EQ41"><mml:math id="M45"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>&#x0002B;</mml:mo><mml:mi>u</mml:mi><mml:mi>h</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:msup><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>h</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>/</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mfrac><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:mo>-</mml:mo><mml:mi>m</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003C4;</mml:mi></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Rearranging, we find that <italic>h</italic>(&#x003C4;) is given by the quadratic equation:</p>
<disp-formula id="EQ42"><mml:math id="M46"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>h</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mo>&#x000B1;</mml:mo><mml:mfrac><mml:mrow><mml:msqrt><mml:mrow><mml:msup><mml:mrow><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:mi>u</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mi>m</mml:mi><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003C4;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>-</mml:mo><mml:mn>2</mml:mn><mml:msup><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:msup><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003C4;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:msqrt></mml:mrow><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:msup><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mi>m</mml:mi><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mo>-</mml:mo><mml:mi>&#x003C4;</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mo>&#x003BB;</mml:mo><mml:msup><mml:mrow><mml:mi>u</mml:mi></mml:mrow><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>-</mml:mo><mml:mfrac><mml:mrow><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Again, <italic>h</italic><sub>1</sub>(&#x003C4;) is given by one sign in the &#x000B1; symbol, and <italic>h</italic><sub>2</sub>(&#x003C4;) by the other. Inserting <italic>h</italic><sub>1</sub>(&#x003C4;) and <italic>h</italic><sub>2</sub>(&#x003C4;) into <xref ref-type="disp-formula" rid="EQ21">Equations 3</xref>, we find the waiting time CCFs to threshold crossing.</p>
<p><xref ref-type="fig" rid="F6">Figure 6</xref> applies Wald&#x00027;s analysis to the circuit diagram at the bottom of <xref ref-type="fig" rid="F5">Figure 5</xref>. We achieved gain control for the circuit by adjusting the on and off thresholds, depending on which threshold was crossed. The top row of <xref ref-type="fig" rid="F6">Figure 6</xref> shows one example of how the thresholds evolve over a single trial of 2e6 seconds (red and blue traces). We initialized <italic>V</italic><sub>0</sub> &#x0003D; 0 and the on and off thresholds at 10 and &#x02212;10, respectively. Both thresholds were increased or decreased by 0.1, depending on which was crossed. Our other parameter values were <italic>u</italic> &#x0003D; 0.1, <italic>m</italic> &#x0003D; &#x02212;0.00999, and &#x003BB; &#x0003D; 0.1 (so &#x1D53C;[<italic>X</italic>]&#x02248;0). Halfway through the simulation, we slightly increased the photon intensity from 0.1 to 0.102 photons per second (yellow shaded background, top plot). The timings of on- and off-threshold crossings are represented as red and blue raster plots, respectively. For the first half of the simulation, on and off events are approximately equally frequent, so thresholds do not appreciably change. Then, when &#x003BB; increases, some events become significantly more frequent. Both thresholds increase, then balance each other at new values.</p>
<fig position="float" id="F6">
<label>Figure 6</label>
<caption><p>A SPAD sensor feeding inputs to a thresholded capacitive node is a sequential analysis problem. <bold>(Top row)</bold> Output spikes and thresholds of a capacitive node given SPAD sensor input. The SPAD sensor transduces Poisson-distributed photons into electrical impulses. Those impulses are accumulated by a capacitive node. When an impulse arrives, the node&#x00027;s voltage bumps up by an amount <italic>u</italic> &#x0003D; 0.1. As no impulses arrive, the voltage decays linearly at a rate of <italic>m</italic> &#x0003D; &#x02212;0.00999. The voltage is bounded by two thresholds (red and blue traces), and the node produces on and off events (green and red raster plots) when either threshold is crossed. Both thresholds increase or decrease by 0.1 when the on or off threshold is crossed, respectively. Halfway through the simulation, the photon rate increases from &#x003BB; &#x0003D; 0.1 to &#x003BB; &#x0003D; 0.102 (yellow shaded area). As event frequency increases, off events become rare. The thresholds increase accordingly and stabilize at new, higher values when on and off events occur at similar rates. Middle row: CCFs of the number of photons required to cross the on (left panel) or off (right panel) threshold. CCFs and simulation results are plotted in a manner analogous to those in <xref ref-type="fig" rid="F4">Figure 4</xref>. Each panel plots two CCFs, one with thresholds at &#x02013;5 and 15 (solid traces) and the other with thresholds at &#x02013;15 and 5 (dashed traces). <bold>(Bottom row)</bold> Threshold evolution of the capacitive node from 1,000 independent simulations identical to the top row. Thresholds were initialized at &#x02013;10 and 10 with an initial voltage set to <italic>V</italic><sub>0</sub> &#x0003D; 0 in all simulations. Simulation curves reflect 100,000 independent trials; sampling variance is below line thickness at the plotted scale.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1735027-g0006.tif">
<alt-text content-type="machine-generated">Graphical representation with three panels. The top panel shows two lines with different trajectories, labeled &#x0201C;on&#x0201D; and &#x0201C;off,&#x0201D; over a yellow and white background. The middle panel contains two graphs labeled \(\psi_{p|a}(\tau)\) and \(\psi_{p|b}(\tau)\), each showing bell-shaped curves in black and pink. The bottom panel mirrors the top panel&#x00027;s style with lines marked &#x0201C;on&#x0201D; and &#x0201C;off,&#x0201D; displaying convergence over a similar color gradient.</alt-text>
</graphic>
</fig>
<p>The middle row of <xref ref-type="fig" rid="F6">Figure 6</xref> plots CCFs of the number of photons required to hit either threshold, and for particular parameter values. It is directly analogous to the middle row of <xref ref-type="fig" rid="F4">Figure 4</xref>. The only difference between the middle rows of <xref ref-type="fig" rid="F4">Figures 4</xref>, <xref ref-type="fig" rid="F6">6</xref> show that we plot two CCFs in each panel instead of one. Each CCF was plotted for different threshold values. The solid trace CCFs used threshold values of <italic>a</italic> &#x0003D; 15 and <italic>b</italic> &#x0003D; &#x02212;5, and the dashed trace CCFs used <italic>a</italic> &#x0003D; 5 and <italic>b</italic> &#x0003D; &#x02212;15. For all CCFs we used <italic>V</italic><sub>0</sub> &#x0003D; 0, &#x003BB; &#x0003D; 0.1, and <italic>u</italic> &#x0003D; 0.1. We set <italic>m</italic> &#x0003D; &#x003BB;<italic>u</italic>&#x0002B;10<sup>&#x02212;5</sup> to ensure that &#x1D53C;[<italic>X</italic>]&#x02248;0 for all CCFs. The middle row of <xref ref-type="fig" rid="F6">Figure 6</xref> shows that Wald&#x00027;s CCFs are sensitive to threshold values. So every time the capacitive node spikes and we change the threshold values, those CCFs can change appreciably.</p>
<p>The bottom row of <xref ref-type="fig" rid="F6">Figure 6</xref> plots the evolution of the capacitive node&#x00027;s on and off thresholds over 1,000 independent trials. As in the top row, we changed the rate of incoming photons from &#x003BB; &#x0003D; 0.1 to &#x003BB; &#x0003D; 0.102 halfway through each trial (yellow-shaded background). Each time the node spiked, we adjusted both thresholds, as in the top row, depending on which threshold was crossed. Even a small 2% increase in the photon rate biases the node voltage to cross the on threshold with a much higher probability and in a much shorter time. Since both thresholds increase with each event from the node (and vice versa), both thresholds increase soon after the rate of incoming photons increases. Eventually, the thresholds saturate and stabilize around new values.</p>
<p>Recall that our definition of a time step was the waiting time until a photon arrival, including the arrival itself. Therefore, the random variables of our CCFs are the <italic>number of photon arrivals</italic> required to cross one threshold or the other. In <xref ref-type="fig" rid="F6">Figure 6</xref>, notice that the random variable in the CCFs (middle row) and the x-axes of the top and bottom plots are numbers of photons (<italic>P</italic>), and not time <italic>T</italic>. We can easily switch random variables from <italic>P</italic> to <italic>T</italic>. Upon threshold crossing, <italic>P</italic> and <italic>T</italic> are linearly related:</p>
<disp-formula id="EQ43"><mml:math id="M47"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>u</mml:mi><mml:mi>P</mml:mi><mml:mo>-</mml:mo><mml:mi>m</mml:mi><mml:mi>T</mml:mi><mml:mo>=</mml:mo><mml:mi>a</mml:mi><mml:mtext class="textrm" mathvariant="normal">&#x000A0;or&#x000A0;</mml:mtext><mml:mi>b</mml:mi><mml:mtext>&#x02003;</mml:mtext><mml:mo>&#x02192;</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:mi>T</mml:mi><mml:mo>=</mml:mo><mml:mi>u</mml:mi><mml:mi>P</mml:mi><mml:mo>/</mml:mo><mml:mi>m</mml:mi><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>a</mml:mi><mml:mtext class="textrm" mathvariant="normal">&#x000A0;or</mml:mtext><mml:mi>b</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>/</mml:mo><mml:mi>m</mml:mi><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>So the CCFs of <italic>T</italic> are:</p>
<disp-formula id="EQ44"><mml:math id="M48"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mo>|</mml:mo><mml:mi>a</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>u</mml:mi><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>a</mml:mi><mml:mi>&#x003C4;</mml:mi><mml:mo>/</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:msup><mml:mo>;</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mi>&#x003C8;</mml:mi></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mo>|</mml:mo><mml:mi>b</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo stretchy="true">(</mml:mo><mml:mrow><mml:mfrac><mml:mrow><mml:mi>u</mml:mi><mml:mi>&#x003C4;</mml:mi></mml:mrow><mml:mrow><mml:mi>m</mml:mi></mml:mrow></mml:mfrac></mml:mrow><mml:mo stretchy="true">)</mml:mo></mml:mrow><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>b</mml:mi><mml:mi>&#x003C4;</mml:mi><mml:mo>/</mml:mo><mml:mi>m</mml:mi></mml:mrow></mml:msup><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
</sec>
<sec>
<label>3.3</label>
<title>Hypothesis testing</title>
<p>Sequential analysis is directly applicable to hypothesis testing (<xref ref-type="bibr" rid="B43">Wald and Wolfowitz, 1948</xref>). If we have two competing hypotheses <italic>H</italic><sub>0</sub> (the &#x0201C;null hypothesis&#x0201D;) and <italic>H</italic><sub>1</sub> (the &#x0201C;alternative hypothesis&#x0201D;). We observe data that support one hypothesis or the other until we accumulate sufficient evidence to accept one and reject the other with a specified confidence level. For example, imagine an SPAD sensor receiving photons at a rate &#x003BB; that can only be one of two values &#x003BB;<sub>0</sub> or &#x003BB;<sub>1</sub>. We can set our null hypothesis to be that &#x003BB; &#x0003D; &#x003BB;<sub>0</sub> and our alternative hypothesis to be &#x003BB; &#x0003D; &#x003BB;<sub>1</sub>. We observe the output of the SPAD sensor until we conclude, with a confidence level, that the photon rate is one value or the other.</p>
<p>The top plot in <xref ref-type="fig" rid="F7">Figure 7</xref> frames sequential analysis as an online algorithm for hypothesis testing. This algorithm is called the sequential probability ratio test (<xref ref-type="bibr" rid="B43">Wald and Wolfowitz, 1948</xref>) that has been applied across many disciplines (<xref ref-type="bibr" rid="B20">Li and Kulldorff, 2010</xref>; <xref ref-type="bibr" rid="B17">Kulldorff et al., 2011</xref>; <xref ref-type="bibr" rid="B44">Wang and Wan, 2017</xref>; <xref ref-type="bibr" rid="B10">Gold and Shadlen, 2007</xref>). Let the sum <italic>S</italic><sub><italic>t</italic></sub> be the log-likelihood ratio <italic>L</italic><sub><italic>t</italic></sub> of the data, given that either hypothesis is true. For example, we calculate <italic>L</italic><sub><italic>t</italic></sub> for a Poisson rate of incoming photons by taking the log of the ratio of Poisson distributions:</p>
<disp-formula id="EQ45"><mml:math id="M49"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:msub><mml:mrow><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>t</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mo class="qopname">log</mml:mo><mml:mfrac><mml:mrow><mml:mo class="qopname">Pr</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>P</mml:mi><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow><mml:mrow><mml:mo class="qopname">Pr</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mi>P</mml:mi><mml:mo>|</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow></mml:mrow></mml:mfrac><mml:mo>=</mml:mo><mml:mi>P</mml:mi><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mo class="qopname">log</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:mo class="qopname">log</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>-</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mi>t</mml:mi><mml:mo>,</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>for <italic>P</italic> photons in time <italic>t</italic>. The inset in the top plot shows how <italic>L</italic><sub><italic>t</italic></sub> increases by a constant amount upon photon arrival and decays linearly between arrivals. While <italic>b</italic>&#x0003C;<italic>L</italic><sub><italic>t</italic></sub>&#x0003C;<italic>a</italic>, we continue observing new data points because we cannot accept either hypothesis with sufficient confidence. Then, when <italic>L</italic><sub><italic>t</italic></sub> crosses either threshold, we accept the corresponding hypothesis. Two example paths of <italic>L</italic><sub><italic>t</italic></sub> are plotted in <xref ref-type="fig" rid="F7">Figure 7</xref>, one crossing the upper threshold (red path) and the other the lower threshold (blue path).</p>
<fig position="float" id="F7">
<label>Figure 7</label>
<caption><p>The sequential probability ratio test is a famous example of a sequential analysis problem. <bold>(Top plot)</bold> The sum <italic>S</italic><sub><italic>t</italic></sub> is the log likelihood <italic>L</italic><sub><italic>t</italic></sub> of data under two competing hypotheses. In this example, the hypotheses are the photon arrival rates &#x003BB;<sub>0</sub> and &#x003BB;<sub>1</sub>. Two example paths of <italic>L</italic><sub><italic>t</italic></sub> are plotted, one crossing threshold <italic>a</italic> (red) and the other <italic>b</italic> (blue). Thresholds map to the probabilities of correct and incorrect hypothesis detection <italic>p</italic><sub><italic>c</italic></sub> and <italic>p</italic><sub><italic>i</italic></sub> (<italic>y</italic>-axis). When <italic>L</italic><sub><italic>t</italic></sub> crosses a threshold, the test accepts the corresponding hypothesis. The inset is a magnification showing the behavior of <italic>L</italic><sub><italic>t</italic></sub> given the first few photons. <bold>(Bottom plots)</bold> CCFs of the number of photons required for threshold crossing, &#x003C8;<sub><italic>P</italic>|<italic>a</italic></sub>(&#x003C4;) (left column) and &#x003C8;<sub><italic>P</italic>|<italic>b</italic></sub>(&#x003C4;) (right column). The top CCFs assume a photon rate of &#x003BB;<sub>0</sub> and the bottom CCFs assume a photon rate of &#x003BB;<sub>1</sub>. Interpretation of these panels is directly analogous to all other CCFs presented in earlier figures. In all plots, &#x003BB;<sub>0</sub> &#x0003D; 0.1, &#x003BB;<sub>1</sub> &#x0003D; 0.102, <italic>p</italic><sub><italic>c</italic></sub> &#x0003D; 0.9, and <italic>p</italic><sub><italic>i</italic></sub> &#x0003D; 0.1. Notice that &#x003B1; &#x02248; <italic>p</italic><sub><italic>c</italic></sub> or <italic>p</italic><sub><italic>i</italic></sub>, depending on which hypothesis was true. Simulation curves reflect 100,000 independent trials; sampling variance is below line thickness at the plotted scale.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fnins-19-1735027-g0007.tif">
<alt-text content-type="machine-generated">Graphical representation of a log likelihood ratio, featuring a main plot and three subplots. The main plot shows two lines, pink and blue, representing the likelihood ratio over time, with horizontal dashed reference lines labeled \(L_0 \), \(P_1 = 5982 \), and \(P_2 = 4202 \). The inset graph emphasizes a segment of the pink line. The bottom subplots display curves with parameters for different lambda (\(\lambda\)) values and alpha (\(\alpha\)) levels. The vertical axis ranges from negative to positive values over varying tau (\(\tau\)) ranges.</alt-text>
</graphic>
</fig>
<p>The thresholds <italic>a</italic> and <italic>b</italic> map beautifully to the probabilities of correct and incorrect hypothesis detection <italic>p</italic><sub><italic>c</italic></sub> and <italic>p</italic><sub><italic>i</italic></sub>:</p>
<disp-formula id="EQ46"><mml:math id="M50"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mi>a</mml:mi><mml:mo>&#x02248;</mml:mo><mml:mo class="qopname">log</mml:mo><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:mo>/</mml:mo><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>;</mml:mo><mml:mtext>&#x02003;&#x000A0;</mml:mtext><mml:mi>b</mml:mi><mml:mo>&#x02248;</mml:mo><mml:mo class="qopname">log</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>c</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>/</mml:mo><mml:mrow><mml:mo stretchy="false">(</mml:mo><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mi>p</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo stretchy="false">)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>Thus, before we start the test, we decide on &#x02018;acceptable&#x00027; errors: accepting the hypothesis <italic>H</italic><sub>0</sub> when <italic>H</italic><sub>1</sub> is true, and vice versa. Then we simply calculate the thresholds that we should use for the test to achieve those probabilities. In <xref ref-type="fig" rid="F7">Figure 7</xref>, we set <italic>p</italic><sub><italic>c</italic></sub> &#x0003D; 0.9 and <italic>p</italic><sub><italic>i</italic></sub> &#x0003D; 0.1. One reason that the sequential probability ratio test is a very popular hypothesis testing algorithm is that it enjoys a significant optimality property. No other test can achieve the same or better <italic>p</italic><sub><italic>c</italic></sub> with a lower expected number of samples (<xref ref-type="bibr" rid="B43">Wald and Wolfowitz, 1948</xref>). When samples are expensive, e.g., testing the efficacy of a new drug that is expensive to manufacture, this optimality property is very valuable.</p>
<p>Our sequential probability ratio test on Poisson-distributed photons is identical to our application on the current-integrating node, given SPAD sensor input from the previous subsection. The linear decay between photon arrivals (what we called <italic>m</italic>) is (&#x003BB;<sub>1</sub>&#x02212;&#x003BB;<sub>0</sub>) here. The voltage bumps upon photon arrivals (what we called <italic>u</italic>) are (log&#x003BB;<sub>1</sub>&#x02212;log&#x003BB;<sub>0</sub>) here. Therefore, the capacitive node from the previous subsection <bold>implements a sequential probability ratio test for two particular photon intensities</bold>. The value of the photon intensities in the test is implied by the slope of the decay between photon arrivals and the amount its voltage bumps up upon photon arrivals:</p>
<disp-formula id="EQ47"><mml:math id="M51"><mml:mtable columnalign="left"><mml:mtr><mml:mtd><mml:mtext>&#x02003;&#x02003;&#x02003;</mml:mtext><mml:mi>m</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:mi>u</mml:mi><mml:mo>=</mml:mo><mml:mo class="qopname">log</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:mo class="qopname">log</mml:mo><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mo>&#x02192;</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>0</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mi>m</mml:mi><mml:mo>&#x0002B;</mml:mo><mml:mfrac><mml:mrow><mml:mi>m</mml:mi><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>,</mml:mo><mml:mtext>&#x02003;</mml:mtext><mml:msub><mml:mrow><mml:mo>&#x003BB;</mml:mo></mml:mrow><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>m</mml:mi><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msup></mml:mrow><mml:mrow><mml:mn>1</mml:mn><mml:mo>-</mml:mo><mml:msup><mml:mrow><mml:mi>e</mml:mi></mml:mrow><mml:mrow><mml:mi>u</mml:mi></mml:mrow></mml:msup></mml:mrow></mml:mfrac><mml:mo>.</mml:mo></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>
<p>The thresholds of the capacitive node <bold>define probabilities of correct and incorrect hypothesis detection of the test</bold>. Thus, when we changed thresholds after each threshold crossing in the previous subsection, we implicitly changed those detection probabilities. The output spikes of the node are its assertions that it accepts one photon rate over another. We can simply reuse the mathematical analysis from the previous subsection for this sequential probability ratio test.</p>
<p>The bottom half of <xref ref-type="fig" rid="F7">Figure 7</xref> plots four waiting time CCFs that we obtained from <xref ref-type="disp-formula" rid="EQ21">Equation 3</xref>. More specifically, they are CCFs for the number of photons <italic>P</italic> required to cross either threshold. Again, real and imaginary parts of the CCFs are given by the pink/red and gray/black traces, respectively. Again, solid traces are theoretical results from <xref ref-type="disp-formula" rid="EQ21">Equation 3</xref> and dashed traces are Fourier transforms of simulated waiting times until threshold crossing. In all plots, we set the photon rates to be &#x003BB;<sub>0</sub> &#x0003D; 0.1 and &#x003BB;<sub>1</sub> &#x0003D; 0.102. The CCFs in the left column are &#x003C8;<sub><italic>P</italic>|<italic>a</italic></sub>(&#x003C4;) and the CCFs in the right column are &#x003C8;<sub><italic>P</italic>|<italic>b</italic></sub>(&#x003C4;). The top CCFs were evaluated under hypothesis <italic>H</italic><sub>0</sub>, i.e., the photon rate was &#x003BB; &#x0003D; &#x003BB;<sub>0</sub>. The bottom CCFs were evaluated under hypothesis <italic>H</italic><sub>1</sub>. We print threshold crossing probabilities in the left panels. Notice that &#x003B1;&#x02248;0.1 and &#x003B1;&#x02248;0.9 are in close agreement with our desired probabilities of correct and incorrect detection <italic>p</italic><sub><italic>c</italic></sub> and <italic>p</italic><sub><italic>i</italic></sub>.</p>
</sec>
</sec>
<sec sec-type="discussion" id="s4">
<label>4</label>
<title>Discussion</title>
<p>Threshold-crossing problems, such as sequential analysis, provide a rigorous framework for evaluating and/or interpreting neuromorphic architectures. We have illustrated how the same mathematical framework can play three different roles, depending on its context. First, sequential analysis can serve as a <bold>benchmark</bold> to compare against hardware. Similar to the Carnot engine in thermodynamics, it does not need to model real devices to be useful. Second, it can serve as a <bold>proxy model</bold> that makes circuit behavior tractable. By abstracting away transistor-level details, sequential analysis offers a tractable model that maps circuit parameters to dynamics. Third, it can serve as a <bold>design tool</bold> that prescribes optimal circuit behavior. Framing circuits as statistical decision-makers turns parameter selection into a principled mapping from problem specification to circuit design. Together, these three roles establish sequential analysis as a benchmark, proxy model, and design tool for neuromorphic hardware within a unified statistical language.</p>
<p>Our first application, noisy event pixels, illustrates the value of sequential analysis as a <bold>benchmark</bold>. Real pixels generate spurious events even under constant illumination (<xref ref-type="bibr" rid="B21">Lichtsteiner et al., 2008</xref>; <xref ref-type="bibr" rid="B15">iniVation, 2020</xref>; <xref ref-type="bibr" rid="B12">Hamilton et al., 2014</xref>). The statistics of those events are difficult to model in detail because of device mismatch and circuit-level complexity. Sequential analysis does not attempt to reproduce those details. Instead, it provides the ideal statistical baseline for fluctuations, defining what noise would look like if the pixel were to follow a perfectly tractable random process. This role is directly analogous to that of the Carnot engine in thermodynamics. No real physical engine achieves it, but it defines the efficiency ceiling and establishes a rigorous language for comparison. In the same way, sequential analysis supplies neuromorphic engineers with an ideal standard against which measured device behavior can be evaluated. Deviations from this benchmark are then informative rather than merely problematic.</p>
<p>Our second application, adaptive dynamics in neuromorphic circuits, illustrates the value of sequential analysis as a <bold>proxy model</bold>. Circuit behavior is often shaped by many interdependent parameters at the transistor level, making tractable analysis impossible. Sequential analysis does not replicate those low-level mechanisms. Instead, it provides a simplified statistical model in which adaptation appears as a shift in decision thresholds or effective time constants. This abstraction connects measurable input&#x02013;output behavior to the circuit&#x00027;s underlying computational role. Sequential analysis provides a tractable stand-in that reveals how circuit parameters drive observable dynamics.</p>
<p>Our third application, decision-making circuits, illustrates the value of sequential analysis as a <bold>design tool</bold> (<xref ref-type="bibr" rid="B4">Burri et al., 2014</xref>; <xref ref-type="bibr" rid="B45">Woods et al., 2019</xref>). When circuits are viewed as statistical decision-makers, problem requirements such as tolerable error rates map directly onto circuit parameters, such as thresholds and decay rates. Sequential analysis provides the formal framework that defines this mapping, making design choices constructive rather than empirical. Under the assumptions of sequential analysis, hardware decision times inherit valuable optimality properties (<xref ref-type="bibr" rid="B43">Wald and Wolfowitz, 1948</xref>). This perspective also makes the computational function of a circuit node transparent. For example, a current-integrating node can be interpreted as implementing an online sequential probability ratio test on SPAD input. Each output event is its probabilistic assertion that the incident photon intensity belongs to one hypothesis rather than another. Threshold values specify the confidence level of those assertions. Sequential analysis, therefore, provides both design prescriptions and rigorous probabilistic interpretations of circuit behavior.</p>
<p>While powerful, sequential analysis rests on restrictive assumptions that limit its direct applicability. Classical formulations require independent and identically distributed observations. In practice, neuromorphic signals rarely meet this criterion. Inputs are often time-varying, reflecting both stimulus dynamics and adaptive circuit responses. They are also correlated across space and time, violating the independence assumptions of Wald&#x00027;s classical approach. Moreover, optimal sequential tests assume that the likelihood ratio between competing hypotheses can be written down explicitly. In practice, this explicit representation is rarely available. The probability distributions of real inputs are often too complex to admit closed-form likelihoods, e.g., due to device mismatch. These challenges do not invalidate sequential analysis, but they highlight the need for extensions that adapt the framework to more realistic conditions.</p>
<p>The assumption of stationarity is incompatible with sensory streams, where input rates fluctuate due to both external stimuli and circuit-level adaptation. Sequential analysis can still be applied by invoking the time-rescaling theorem (<xref ref-type="bibr" rid="B3">Brown et al., 2002</xref>; <xref ref-type="bibr" rid="B13">Haslinger et al., 2010</xref>), which transforms an inhomogeneous Poisson process into a homogeneous one of unit rate. This transformation preserves the statistics of event timing while removing the nonstationarity. So we can apply classical sequential methods in the rescaled time domain. If we also had an estimate of the inhomogeneous intensity (<xref ref-type="bibr" rid="B33">Ng and Murphy, 2019</xref>), we could invert our results to return to the original time domain. Circuits that integrate changing input rates can still be interpreted within a sequential framework, but with temporal variability absorbed into the rescaling.</p>
<p>The assumption of independence is also violated in practice, since neuromorphic signals are often correlated across space and time. These dependencies break the classical proofs of optimality that rely on the i.i.d. structure of Wald&#x00027;s original framework. However, the sequential paradigm itself does not require independence. It only requires that we can quantify how new evidence modifies the decision statistic. Correlations can be incorporated directly into the likelihood function when the joint distribution is known or approximated. Even when exact models are unavailable, structured approximations can preserve the essential behavior of the test statistic. Correlation complicates the math, but the core idea of accumulating evidence toward a threshold remains valid.</p>
<p>A further assumption is the absence of threshold overshoot: increments are implicitly taken to be small enough that the process crosses the boundary exactly rather than leaping past it. Real neuromorphic circuits do not obey this constraint. In practice, overshoot does not invalidate the sequential framework; it alters only the mapping between the physical voltage trajectory and the effective statistical test. Standard corrections are available: boundary-adjustment factors derived from renewal theory, first-passage approximations for jump processes, or continuous-time formulations in which the hazard rate characterizes crossings without requiring perfect boundary contact. These provide engineering countermeasures that preserve the utility of sequential analysis even when threshold crossings occur with finite overshoot.</p>
<p>Although our derivations rely on i.i.d. and stationary increments, their role in the three case studies is as a principled reference rather than a literal description of hardware or biology. For the idealized event-pixel example, the assumption isolates the fundamental stochastic mechanism underlying threshold crossings; real pixels introduce temporal correlations, drift, and device-level variability, but these effects serve as structured deviations from the baseline predicted by sequential analysis rather than contradictions. For the LIF node driven by SPAD-like input, the model already includes leaky dynamics and thus diverges from strict i.i.d. behavior; here, sequential analysis provides an analytically tractable approximation that captures the correct scaling of crossing statistics even when the microscopic increments are not perfectly independent. In the SPRT case study, non-i.i.d. evidence streams simply alter the effective log-likelihood update without compromising the decision-theoretic framework. Across all three examples, the idealized assumptions provide a clean baseline that clarifies how each system behaves when higher-order correlations, drift, or non-stationarities are added. This makes deviations interpretable and allows the sequential analysis predictions to function as a benchmark rather than a surrogate for full hardware realism.</p>
<p>Our contribution is conceptual rather than device-specific. We establish how classical sequential analysis provides analytically tractable predictions for threshold-crossing behavior and decision dynamics, and we demonstrate this across three distinct neuromorphic contexts. The case studies serve as proof of principle, showing how the framework can be integrated into existing modeling and design workflows. The simulations validate the theory under controlled conditions. Measurements of event-sensor noise often show deviations from the idealized model used in our first case study, including illumination-dependent drift and heavier-tailed inter-event statistics. These discrepancies are expected: sequential analysis defines the baseline fluctuations in the absence of such circuitry-specific effects, and real data typically sit above this baseline in predictable ways. Hardware evaluation is, therefore, a natural next step for future work.</p>
<p>Although our case studies focus on single nodes, the same framework extends naturally to larger SNNs. Threshold tuning in multi-layer networks is often heuristic, whereas sequential analysis provides explicit predictions for firing rates, false-alarm probabilities, and detection delays. These quantities can be propagated through a network because each layer filters and transforms the distribution observed by the previous one. This makes sequential analysis a principled complement to existing tuning methods: it provides analytical targets for threshold setting and clarifies how architectural changes affect the statistics of threshold events.</p>
<p>Sequential analysis occupies a distinct position relative to existing neuromorphic modeling approaches. Circuit-level models, such as mixed-signal simulations, differential-equation descriptions of neurons, or large-scale numerical SNN frameworks, emphasize numerical fidelity but rarely yield closed-form predictions for error rates, latency distributions, or threshold-crossing probabilities. In contrast, sequential analysis trades low-level detail for analytical transparency; it provides exact or asymptotic expressions for these quantities with far lower computational cost. Its value is therefore complementary rather than competitive: classical models capture device realism, while sequential analysis supplies principled performance bounds and interpretable operating points that would otherwise require extensive simulation to estimate.</p>
<p>The same threshold-centric view also connects to contemporary SNN architectures. In models such as Spiking Transformers (<xref ref-type="bibr" rid="B47">Zhao et al., 2025</xref>), multi-modal SNNs with temporal attention (<xref ref-type="bibr" rid="B36">Shen et al., 2025</xref>), or recurrent SNNs employing adaptive history mechanisms (<xref ref-type="bibr" rid="B46">Xu et al., 2023</xref>), performance ultimately depends on how local membrane-state variables cross internal thresholds to emit spikes. Sequential analysis provides closed-form links between increment statistics, firing probabilities, and expected latencies. These metrics can inform the co-design of thresholds or attention-gating rules in these architectures. More biologically grounded models with adaptive dendritic processes (<xref ref-type="bibr" rid="B24">Mao et al., 2025</xref>) can also be framed in this way: the dendritic dynamics shape the effective distribution seen at the soma (or the relevant spike initiation zone), and the same machinery characterizes the resulting crossing statistics. Thus, while specific implementation details differ across architectures, the underlying principles&#x02013;increment distributions, drift, variance, and threshold geometry&#x02013;admit the same analytical treatment and offer a complementary tool for understanding and tuning complex SNNs.</p>
<p>Classical sequential tests require exact likelihood functions to define optimal decision rules (<xref ref-type="bibr" rid="B37">Singh and Zhurbenko, 1975</xref>; <xref ref-type="bibr" rid="B2">Bartoo and Puri, 1967</xref>). The assumption of known likelihood ratios is fragile because its underlying distributions may be analytically intractable. When these functions are unknown, one approach is to exploit the Karlin&#x02013;Rubin theorem. We can still identify uniformly most powerful tests within exponential families, even without explicit forms of the likelihood ratio. More generally, sequential analysis can be extended to composite hypothesis testing, where decision rules are constructed across a family of possible distributions rather than a single known model (<xref ref-type="bibr" rid="B38">Tartakovsky et al., 2014</xref>). We can often relax the requirement of closed-form likelihoods while retaining the sequential structure of accumulating evidence toward a threshold.</p>
<p>Sequential analysis provides more than a narrow mathematical idealization. We consider it a practical language for thinking about neuromorphic circuits. As a <bold>benchmark</bold>, it defines rigorous statistical baselines that reveal when hardware deviates from expectation. As a <bold>proxy model</bold>, it makes intractable circuit dynamics interpretable by recasting them in a simplified but faithful statistical form. As a <bold>design tool</bold>, it translates performance goals into circuit parameters, providing constructive prescriptions for circuit design rather than empirical tuning. While its classical formulation relies on restrictive assumptions of stationarity, independence, and closed-form likelihoods, these are not fatal obstacles. Extensions such as time-rescaling, correlated likelihoods, and composite hypothesis testing preserve the sequential paradigm while broadening its reach to realistic neuromorphic signals and circuits.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s5">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="supplementary-material" rid="SM1">Supplementary material</xref>, further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="author-contributions" id="s6">
<title>Author contributions</title>
<p>SM: Investigation, Writing &#x02013; review &#x00026; editing, Writing &#x02013; original draft, Formal analysis, Validation, Methodology, Conceptualization. SA: Visualization, Writing &#x02013; original draft, Supervision, Validation, Methodology, Writing &#x02013; review &#x00026; editing. TM: Visualization, Project administration, Formal analysis, Writing &#x02013; review &#x00026; editing, Conceptualization, Supervision, Methodology, Writing &#x02013; original draft, Investigation.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="s8">
<title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="s9">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="s10">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fnins.2025.1735027/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fnins.2025.1735027/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/></sec>
<ref-list>
<title>References</title>
<ref id="B1">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Afshar</surname> <given-names>S.</given-names></name> <name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Davis</surname> <given-names>L.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name> <name><surname>Delic</surname> <given-names>D. V.</given-names></name></person-group> (<year>2020</year>). <article-title>Event based processing of single photon avalanche diode sensors</article-title>. <source>IEEE Sens. J</source>. <volume>20</volume>, <fpage>7677</fpage>&#x02013;<lpage>7691</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JSEN.2020.2979761</pub-id></mixed-citation>
</ref>
<ref id="B2">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bartoo</surname> <given-names>J. B.</given-names></name> <name><surname>Puri</surname> <given-names>P. S.</given-names></name></person-group> (<year>1967</year>). <article-title>On optimal asymptotic tests of composite statistical hypotheses</article-title>. <source>Ann. Math. Stat</source>. <volume>38</volume>, <fpage>1845</fpage>&#x02013;<lpage>1852</lpage>. doi: <pub-id pub-id-type="doi">10.1214/aoms/1177698617</pub-id></mixed-citation>
</ref>
<ref id="B3">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Brown</surname> <given-names>E. N.</given-names></name> <name><surname>Barbieri</surname> <given-names>R.</given-names></name> <name><surname>Ventura</surname> <given-names>V.</given-names></name> <name><surname>Kass</surname> <given-names>R. E.</given-names></name> <name><surname>Frank</surname> <given-names>L. M.</given-names></name></person-group> (<year>2002</year>). <article-title>The time-rescaling theorem and its application to neural spike train data analysis</article-title>. <source>Neural Comput</source>. <volume>14</volume>, <fpage>325</fpage>&#x02013;<lpage>346</lpage>. doi: <pub-id pub-id-type="doi">10.1162/08997660252741149</pub-id><pub-id pub-id-type="pmid">11802915</pub-id></mixed-citation>
</ref>
<ref id="B4">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Burri</surname> <given-names>S.</given-names></name> <name><surname>Charbon</surname> <given-names>E.</given-names></name> <name><surname>Bruschini</surname> <given-names>C.</given-names></name></person-group> (<year>2014</year>). <article-title>Architecture and applications of a high resolution gated SPAD image sensor</article-title>. <source>Optics Express</source> <volume>22</volume>, <fpage>17573</fpage>&#x02013;<lpage>17589</lpage>. doi: <pub-id pub-id-type="doi">10.1364/OE.22.017573</pub-id><pub-id pub-id-type="pmid">25090572</pub-id></mixed-citation>
</ref>
<ref id="B5">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Christensen</surname> <given-names>D. V.</given-names></name> <name><surname>Dittmann</surname> <given-names>R.</given-names></name> <name><surname>Linares-Barranco</surname> <given-names>B.</given-names></name> <name><surname>Sebastian</surname> <given-names>A.</given-names></name> <name><surname>Le Gallo</surname> <given-names>M.</given-names></name> <name><surname>Redaelli</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>2022 roadmap on neuromorphic computing and engineering</article-title>. <source>Neuromorph. Comput. Eng</source>. <volume>2</volume>:<fpage>022501</fpage>. doi: <pub-id pub-id-type="doi">10.1088/2634-4386/ac4a83</pub-id></mixed-citation>
</ref>
<ref id="B6">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Delic</surname> <given-names>D. V.</given-names></name> <name><surname>Afshar</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>Neuromorphic single photon avalanche detector (spad) array microchip</article-title>. <source>Filed</source> <volume>15</volume>:<fpage>2020</fpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://patents.google.com/patent/US11474215B2/en">https://patents.google.com/patent/US11474215B2/en</ext-link></mixed-citation>
</ref>
<ref id="B7">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Delic</surname> <given-names>D. V.</given-names></name> <name><surname>Afshar</surname> <given-names>S.</given-names></name></person-group> (<year>2024</year>). <article-title>&#x0201C;Neuromorphic computing for compact LiDAR systems,&#x0201D;</article-title> in <source>More than Moore Devices and Integration for Semiconductors</source>, chap. 9, eds F. Iacopi and F. Balestra (Cham: Springer Nature), <fpage>191</fpage>&#x02013;<lpage>240</lpage>. doi: <pub-id pub-id-type="doi">10.1007/978-3-031-21610-7_6</pub-id></mixed-citation>
</ref>
<ref id="B8">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Doob</surname> <given-names>J. L.</given-names></name></person-group> (<year>1953</year>). <source>Stochastic Processes</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>Wiley</publisher-name>.</mixed-citation>
</ref>
<ref id="B9">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gallego</surname> <given-names>G.</given-names></name> <name><surname>Delbruck</surname> <given-names>T.</given-names></name> <name><surname>Orchard</surname> <given-names>G.</given-names></name> <name><surname>Bartolozzi</surname> <given-names>C.</given-names></name> <name><surname>Taba</surname> <given-names>B.</given-names></name> <name><surname>Censi</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Event-based vision: a survey</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell</source>. <volume>44</volume>, <fpage>154</fpage>&#x02013;<lpage>180</lpage>. doi: <pub-id pub-id-type="doi">10.1109/TPAMI.2020.3008413</pub-id><pub-id pub-id-type="pmid">32750812</pub-id></mixed-citation>
</ref>
<ref id="B10">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gold</surname> <given-names>J. I.</given-names></name> <name><surname>Shadlen</surname> <given-names>M. N.</given-names></name></person-group> (<year>2007</year>). <article-title>The neural basis of decision making</article-title>. <source>Annu. Rev. Neurosci</source>. <volume>30</volume>, <fpage>535</fpage>&#x02013;<lpage>574</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev.neuro.29.051605.113038</pub-id><pub-id pub-id-type="pmid">17600525</pub-id></mixed-citation>
</ref>
<ref id="B11">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gyongy</surname> <given-names>I.</given-names></name> <name><surname>Halimi</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>A 128 &#x000D7; 128 SPAD motion triggered time of flight image sensor with in pixel histogram and column parallel vision processor</article-title>. <source>IEEE J. Solid State Circuits</source> <volume>55</volume>, <fpage>1762</fpage>&#x02013;<lpage>1775</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JSSC.2020.2993722</pub-id></mixed-citation>
</ref>
<ref id="B12">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Afshar</surname> <given-names>S.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name> <name><surname>Tapson</surname> <given-names>J.</given-names></name></person-group> (<year>2014</year>). <article-title>Stochastic electronics: a neuro-inspired design paradigm for integrated circuits</article-title>. <source>Proc. IEEE</source> <volume>102</volume>, <fpage>843</fpage>&#x02013;<lpage>859</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JPROC.2014.2310713</pub-id></mixed-citation>
</ref>
<ref id="B13">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Haslinger</surname> <given-names>R.</given-names></name> <name><surname>Pipa</surname> <given-names>G.</given-names></name> <name><surname>Brown</surname> <given-names>E.</given-names></name></person-group> (<year>2010</year>). <article-title>Discrete time rescaling theorem: determining goodness of fit for discrete time statistical models of neural spiking</article-title>. <source>Neural Comput</source>. <volume>22</volume>, <fpage>2477</fpage>&#x02013;<lpage>2506</lpage>. doi: <pub-id pub-id-type="doi">10.1162/NECO_a_00015</pub-id><pub-id pub-id-type="pmid">20608868</pub-id></mixed-citation>
</ref>
<ref id="B14">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Indiveri</surname> <given-names>G.</given-names></name> <name><surname>Linares-Barranco</surname> <given-names>B.</given-names></name> <name><surname>Hamilton</surname> <given-names>T. J.</given-names></name> <name><surname>Schaik</surname> <given-names>A.</given-names></name> v <name><surname>Etienne-Cummings</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2011</year>). <article-title>Neuromorphic silicon neuron circuits</article-title>. <source>Front. Neurosci</source>. <volume>5</volume>:<fpage>73</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2011.00073</pub-id><pub-id pub-id-type="pmid">21747754</pub-id></mixed-citation>
</ref>
<ref id="B15">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>iniVation</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <source>Understanding the Performance of Neuromorphic Event-Based Vision Sensors</source>. Tech. Rep, Zurich, Switzerland.</mixed-citation>
</ref>
<ref id="B16">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kira</surname> <given-names>S.</given-names></name> <name><surname>Yang</surname> <given-names>T.</given-names></name> <name><surname>Shadlen</surname> <given-names>M. N.</given-names></name></person-group> (<year>2015</year>). <article-title>A neural implementation of wald&#x00027;s sequential probability ratio test</article-title>. <source>Neuron</source> <volume>85</volume>, <fpage>861</fpage>&#x02013;<lpage>873</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2015.01.007</pub-id><pub-id pub-id-type="pmid">25661183</pub-id></mixed-citation>
</ref>
<ref id="B17">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kulldorff</surname> <given-names>M.</given-names></name> <name><surname>Davis</surname> <given-names>R. L.</given-names></name> <name><surname>Kolczak</surname> <given-names>&#x02020;, M.</given-names></name> <name><surname>Lewis</surname> <given-names>E.</given-names></name> <name><surname>Lieu</surname> <given-names>T.</given-names></name> <name><surname>R. P</surname></name></person-group>. (<year>2011</year>). <article-title>A maximized sequential probability ratio test for drug and vaccine safety surveillance</article-title>. <source>Sequential Anal</source>. <volume>30</volume>, <fpage>58</fpage>&#x02013;<lpage>78</lpage>. doi: <pub-id pub-id-type="doi">10.1080/07474946.2011.539924</pub-id></mixed-citation>
</ref>
<ref id="B18">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Lai</surname> <given-names>T. L.</given-names></name></person-group> (<year>2009a</year>). <source>Martingales in Sequential Analysis and Time Series, 1945-1985, Volume 5. Seminaire&#x00301; d&#x00027;Histoire du Calcul des Probabilit&#x000E9;s</source> <publisher-loc>et de la Statistique, EHESS, Paris; Laboratoire de Probabilites&#x00301; et Modeles&#x00301; Aleatoires&#x00301;. Paris</publisher-loc>: <publisher-name>Univerit&#x000E9; Paris VI et VII</publisher-name>.</mixed-citation>
</ref>
<ref id="B19">
<mixed-citation publication-type="web"><person-group person-group-type="author"><name><surname>Lai</surname> <given-names>T. L.</given-names></name></person-group> (<year>2009b</year>). <article-title>Sequential analysis: Some classical problems and new challenges</article-title>. <source>Stat. Sin</source>. <volume>19</volume>, <fpage>303</fpage>&#x02013;<lpage>351</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://www.jstor.org/stable/24306854">https://www.jstor.org/stable/24306854</ext-link></mixed-citation>
</ref>
<ref id="B20">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>L.</given-names></name> <name><surname>Kulldorff</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>A conditional maximized sequential probability ratio test for pharmacovigilance</article-title>. <source>Stat. Med</source>. <volume>29</volume>, <fpage>284</fpage>&#x02013;<lpage>295</lpage>. doi: <pub-id pub-id-type="doi">10.1002/sim.3780</pub-id><pub-id pub-id-type="pmid">19941282</pub-id></mixed-citation>
</ref>
<ref id="B21">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lichtsteiner</surname> <given-names>P.</given-names></name> <name><surname>Posch</surname> <given-names>C.</given-names></name> <name><surname>Delbruck</surname> <given-names>T.</given-names></name></person-group> (<year>2008</year>). <article-title>A 128x128 120 db 15 us latency asynchronous temporal contrast vision sensor</article-title>. <source>IEEE J. Solid-State Circuits</source> <volume>43</volume>, <fpage>566</fpage>&#x02013;<lpage>576</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JSSC.2007.914337</pub-id></mixed-citation>
</ref>
<ref id="B22">
<mixed-citation publication-type="thesis"><person-group person-group-type="author"><name><surname>Mahowald</surname> <given-names>M.</given-names></name></person-group> (<year>1992</year>). <source>VLSI Analogs of Neuronal Visual Processing: A Synthesis of Form and Function</source> (<publisher-loc>Ph.D. thesis</publisher-loc>). California Institute of Technology Pasadena, Pasadena, California, United States.</mixed-citation>
</ref>
<ref id="B23">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mani</surname> <given-names>S.</given-names></name> <name><surname>Hurley</surname> <given-names>P.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name> <name><surname>Monk</surname> <given-names>T.</given-names></name></person-group> (<year>2025</year>). <article-title>The leaky integrate-and-fire neuron is a change-point detector for compound poisson processes</article-title>. <source>Neural Comput</source>. <volume>37</volume>, <fpage>926</fpage>&#x02013;<lpage>956</lpage>. doi: <pub-id pub-id-type="doi">10.1162/neco_a_01750</pub-id><pub-id pub-id-type="pmid">40112139</pub-id></mixed-citation>
</ref>
<ref id="B24">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mao</surname> <given-names>J.</given-names></name> <name><surname>Zheng</surname> <given-names>H.</given-names></name> <name><surname>Yin</surname> <given-names>H.</given-names></name> <name><surname>Fan</surname> <given-names>H.</given-names></name> <name><surname>Mei</surname> <given-names>L.</given-names></name> <name><surname>Guo</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2025</year>). <article-title>Adaptive dendritic plasticity in brain-inspired dynamic neural networks for enhanced multi-timescale feature extraction</article-title>. <source>Neural Netw</source>. <volume>194</volume>:<fpage>108191</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neunet.2025.108191</pub-id><pub-id pub-id-type="pmid">41082828</pub-id></mixed-citation>
</ref>
<ref id="B25">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mead</surname> <given-names>C.</given-names></name></person-group> (<year>1989</year>). <article-title>Analog vlsi and neutral systems</article-title>. <source>NASA STI/recon Techn. Rep. A</source> <volume>90</volume>:<fpage>16574</fpage>.</mixed-citation>
</ref>
<ref id="B26">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monk</surname> <given-names>T.</given-names></name> <name><surname>Dennler</surname> <given-names>N.</given-names></name> <name><surname>Ralph</surname> <given-names>N.</given-names></name> <name><surname>Rastogi</surname> <given-names>S.</given-names></name> <name><surname>Afshar</surname> <given-names>S.</given-names></name> <name><surname>Urbizagastegui</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2024</year>). <article-title>Electrical signaling beyond neurons</article-title>. <source>Neural Comput</source>. <volume>36</volume>, <fpage>1939</fpage>&#x02013;<lpage>2029</lpage>. doi: <pub-id pub-id-type="doi">10.1162/neco_a_01696</pub-id><pub-id pub-id-type="pmid">39141803</pub-id></mixed-citation>
</ref>
<ref id="B27">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monk</surname> <given-names>T.</given-names></name> <name><surname>Green</surname> <given-names>P.</given-names></name> <name><surname>Paulin</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>Martingales and fixation probabilities of evolutionary graphs</article-title>. <source>Proc. R. Soc. A: Math. Phys. Eng. Sci</source>. <volume>470</volume>:<fpage>20130730</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rspa.2013.0730</pub-id></mixed-citation>
</ref>
<ref id="B28">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monk</surname> <given-names>T.</given-names></name> <name><surname>Paulin</surname> <given-names>M. G.</given-names></name> <name><surname>Green</surname> <given-names>P.</given-names></name></person-group> (<year>2015</year>). <article-title>Ecological constraints on the origin of neurones</article-title>. <source>J. Math. Biol</source>. <volume>71</volume>, <fpage>1299</fpage>&#x02013;<lpage>1324</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00285-015-0862-7</pub-id><pub-id pub-id-type="pmid">25697835</pub-id></mixed-citation>
</ref>
<ref id="B29">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monk</surname> <given-names>T.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name></person-group> (<year>2020</year>). <article-title>Wald&#x00027;s martingale and the conditional distributions of absorption time in the moran process</article-title>. <source>Proc. R. Soc. A: Math. Phys. Eng. Sci</source>. <volume>476</volume>:<fpage>20200135</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rspa.2020.0135</pub-id></mixed-citation>
</ref>
<ref id="B30">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monk</surname> <given-names>T.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name></person-group> (<year>2021</year>). <article-title>Martingales and the characteristic functions of absorption time on bipartite graphs</article-title>. <source>R. Soc. Open Sci</source>. <volume>8</volume>:<fpage>210657</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rsos.210657</pub-id><pub-id pub-id-type="pmid">34703620</pub-id></mixed-citation>
</ref>
<ref id="B31">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Monk</surname> <given-names>T.</given-names></name> <name><surname>van Schaik</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>Martingales and the fixation time of evolutionary graphs with arbitrary dimensionality</article-title>. <source>R. Soc. Open Sci</source>. <volume>9</volume>:<fpage>220011</fpage>. doi: <pub-id pub-id-type="doi">10.1098/rsos.220011</pub-id><pub-id pub-id-type="pmid">35573040</pub-id></mixed-citation>
</ref>
<ref id="B32">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Morrison</surname> <given-names>D.</given-names></name> <name><surname>Kennedy</surname> <given-names>S.</given-names></name> <name><surname>Delic</surname> <given-names>D. V.</given-names></name> <name><surname>Yuce</surname> <given-names>M. R.</given-names></name> <name><surname>Redoute</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>A 64 &#x000D7; 64 SPAD flash LiDAR sensor using a triple integration timing technique with 1.95 mm depth resolution</article-title>. <source>IEEE Sens. J</source>. <volume>20</volume>, <fpage>14072</fpage>&#x02013;<lpage>14082</lpage>. doi: <pub-id pub-id-type="doi">10.1109/JSEN.2020.3030788</pub-id></mixed-citation>
</ref>
<ref id="B33">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ng</surname> <given-names>T. L. J.</given-names></name> <name><surname>Murphy</surname> <given-names>T. B.</given-names></name></person-group> (<year>2019</year>). <article-title>Estimation of the intensity function of an inhomogeneous poisson process with a change-point</article-title>. <source>Can. J. Stat</source>. <volume>47</volume>, <fpage>604</fpage>&#x02013;<lpage>618</lpage>. doi: <pub-id pub-id-type="doi">10.1002/cjs.11514</pub-id></mixed-citation>
</ref>
<ref id="B34">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Padala</surname> <given-names>V.</given-names></name> <name><surname>Basu</surname> <given-names>A.</given-names></name> <name><surname>Orchard</surname> <given-names>G.</given-names></name></person-group> (<year>2018</year>). <article-title>A noise filtering algorithm for event-based asynchronous change detection image sensors on truenorth and its implementation on truenorth</article-title>. <source>Front. Neurosci</source>. <volume>12</volume>:<fpage>118</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2018.00118</pub-id><pub-id pub-id-type="pmid">29556172</pub-id></mixed-citation>
</ref>
<ref id="B35">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shadlen</surname> <given-names>M. N.</given-names></name> <name><surname>Shohamy</surname> <given-names>D.</given-names></name></person-group> (<year>2016</year>). <article-title>Decision making and sequential sampling from memory</article-title>. <source>Neuron</source> <volume>90</volume>, <fpage>927</fpage>&#x02013;<lpage>939</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.neuron.2016.04.036</pub-id><pub-id pub-id-type="pmid">27253447</pub-id></mixed-citation>
</ref>
<ref id="B36">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Shen</surname> <given-names>J.</given-names></name> <name><surname>Xie</surname> <given-names>Y.</given-names></name> <name><surname>Xu</surname> <given-names>Q.</given-names></name> <name><surname>Pan</surname> <given-names>G.</given-names></name> <name><surname>Tang</surname> <given-names>H.</given-names></name> <name><surname>Chen</surname> <given-names>B.</given-names></name></person-group> (<year>2025</year>). <article-title>&#x0201C;Spiking neural networks with temporal attention-guided adaptive fusion for imbalanced multi-modal learning,&#x0201D;</article-title> in <source>Proceedings of the 33rd ACM International Conference on Multimedia</source> (<publisher-loc>Dublin</publisher-loc>), <fpage>11042</fpage>&#x02013;<lpage>11051</lpage>. doi: <pub-id pub-id-type="doi">10.1145/3746027.3755622</pub-id></mixed-citation>
</ref>
<ref id="B37">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>A. C.</given-names></name> <name><surname>Zhurbenko</surname> <given-names>I. G.</given-names></name></person-group> (<year>1975</year>). <article-title>The power of the optimal asymptotic tests of composite statistical hypotheses</article-title>. <source>Proc. Natl. Acad. Sci. U. S. A</source>. <volume>72</volume>, <fpage>577</fpage>&#x02013;<lpage>580</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.72.2.577</pub-id><pub-id pub-id-type="pmid">16592222</pub-id></mixed-citation>
</ref>
<ref id="B38">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tartakovsky</surname> <given-names>A.</given-names></name> <name><surname>Nikiforov</surname> <given-names>I.</given-names></name> <name><surname>Basseville</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). Sequential Analysis: Hypothesis Testing and Changepoint Detection. Chapman and Hall/CRC, 1st edition. doi: <pub-id pub-id-type="doi">10.1201/b17279</pub-id></mixed-citation>
</ref>
<ref id="B39">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Taylor</surname> <given-names>H. M.</given-names></name> <name><surname>Karlin</surname> <given-names>S.</given-names></name></person-group> (<year>1984</year>). <source>An Introduction to Stochastic Modeling</source>. <publisher-loc>Cambridge, MA</publisher-loc>: <publisher-name>Academic Press</publisher-name>. doi: <pub-id pub-id-type="doi">10.1016/B978-0-12-684880-9.50004-6</pub-id></mixed-citation>
</ref>
<ref id="B40">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Urzay</surname> <given-names>C.</given-names></name> <name><surname>Ahad</surname> <given-names>N.</given-names></name> <name><surname>Azabou</surname> <given-names>M.</given-names></name> <name><surname>Schneider</surname> <given-names>A.</given-names></name> <name><surname>Atamkuri</surname> <given-names>G.</given-names></name> <name><surname>Hengen</surname> <given-names>K. B.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Detecting change points in neural population activity with contrastive metric learning</article-title>. <source>Int. IEEE EMBS Conf. Neural Eng</source>. <volume>2023</volume>, <fpage>1</fpage>&#x02013;<lpage>4</lpage>. doi: <pub-id pub-id-type="doi">10.1109/NER52421.2023.10123821</pub-id><pub-id pub-id-type="pmid">37808227</pub-id></mixed-citation>
</ref>
<ref id="B41">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wald</surname> <given-names>A.</given-names></name></person-group> (<year>1944</year>). <article-title>On cumulative sums of random variables</article-title>. <source>Ann. Math. Stat</source>. <volume>15</volume>, <fpage>283</fpage>&#x02013;<lpage>296</lpage>. doi: <pub-id pub-id-type="doi">10.1214/aoms/1177731235</pub-id></mixed-citation>
</ref>
<ref id="B42">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wald</surname> <given-names>A.</given-names></name></person-group> (<year>1947</year>). <source>Sequential Analysis</source>. <publisher-loc>New York, NY</publisher-loc>: <publisher-name>John Wiley and Sons</publisher-name>.</mixed-citation>
</ref>
<ref id="B43">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wald</surname> <given-names>A.</given-names></name> <name><surname>Wolfowitz</surname> <given-names>J.</given-names></name></person-group> (<year>1948</year>). <article-title>Optimum character of the sequential probability ratio test</article-title>. <source>Ann. Math. Stat</source>. <volume>19</volume>, <fpage>326</fpage>&#x02013;<lpage>339</lpage>. doi: <pub-id pub-id-type="doi">10.1214/aoms/1177730197</pub-id></mixed-citation>
</ref>
<ref id="B44">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>W.</given-names></name> <name><surname>Wan</surname> <given-names>H.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;Sequential probability ratio test for multiple-objective ranking and selection,&#x0201D;</article-title> in <source>2017 Winter Simulation Conference (WSC)</source> (<publisher-loc>Piscataway, NJ</publisher-loc>), <fpage>1998</fpage>&#x02013;<lpage>2009</lpage>. doi: <pub-id pub-id-type="doi">10.1109/WSC.2017.8247934</pub-id></mixed-citation>
</ref>
<ref id="B45">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Woods</surname> <given-names>W.</given-names></name> <name><surname>Delic</surname> <given-names>D. V.</given-names></name> <name><surname>Smith</surname> <given-names>B.</given-names></name> <name><surname>&#x0015A;wierkowski</surname> <given-names>L.</given-names></name> <name><surname>Day</surname> <given-names>G.</given-names></name> <name><surname>Devrelis</surname> <given-names>V.</given-names></name> <etal/></person-group>. (<year>2019</year>). <article-title>&#x0201C;Object detection and recognition using laser radars incorporating novel SPAD technology,&#x0201D;</article-title> in <source>Proc</source>. <italic>SPIE 11005, Laser Radar Technology and Applications XXIV</italic> (Bellingham, WA), 110050M. doi: <pub-id pub-id-type="doi">10.1117/12.2517869</pub-id></mixed-citation>
</ref>
<ref id="B46">
<mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>Q.</given-names></name> <name><surname>Gao</surname> <given-names>Y.</given-names></name> <name><surname>Shen</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Ran</surname> <given-names>X.</given-names></name> <name><surname>Tang</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2023</year>). <article-title>Enhancing adaptive history reserving by spiking convolutional block attention module in recurrent neural networks</article-title>. <source>Adv. Neural Inform. Process. Syst</source>. <volume>36</volume>, <fpage>58890</fpage>&#x02013;<lpage>58901</lpage>.</mixed-citation>
</ref>
<ref id="B47">
<mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>L.</given-names></name> <name><surname>Huang</surname> <given-names>Z.</given-names></name> <name><surname>Ding</surname> <given-names>J.</given-names></name> <name><surname>Yu</surname> <given-names>Z.</given-names></name></person-group> (<year>2025</year>). <article-title>&#x0201C;Ttfsformer: a ttfs-based lossless conversion of spiking transformer,&#x0201D;</article-title> in <source>Forty-second International Conference on Machine Learning</source> (<publisher-loc>Vancouver, BC</publisher-loc>), <fpage>77558</fpage>&#x02013;<lpage>77571</lpage>.</mixed-citation>
</ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/368959/overview">Lei Deng</ext-link>, Tsinghua University, China</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/751850/overview">Rong Yao</ext-link>, Taiyuan University of Technology, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3006571/overview">Jiangrong Shen</ext-link>, Xi&#x00027;an Jiaotong University, China</p>
</fn>
</fn-group>
</back>
</article>