<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="review-article" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Neurol.</journal-id>
<journal-title-group>
<journal-title>Frontiers in Neurology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Neurol.</abbrev-journal-title>
</journal-title-group>
<issn pub-type="epub">1664-2295</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fneur.2026.1774729</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Mini Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Artificial intelligence-driven clinical auxiliary diagnosis of benign paroxysmal positional vertigo</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Dai</surname>
<given-names>Siyang</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<uri xlink:href="https://loop.frontiersin.org/people/3326692"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wu</surname>
<given-names>Ying</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kang</surname>
<given-names>Xiaocui</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Shen</surname>
<given-names>Zuoting</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Zhong</surname>
<given-names>Ping</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/782318"/>
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role>
</contrib>
</contrib-group>
<aff id="aff1"><institution>Department of Neurology, Shidong Hospital Affiliated to University of Shanghai for Science and Technology</institution>, <city>Shanghai</city>, <country country="cn">China</country></aff>
<author-notes>
<corresp id="c001"><label>&#x002A;</label>Correspondence: Ping Zhong, <email xlink:href="mailto:zphgl@163.com">zphgl@163.com</email>; Zuoting Shen, <email xlink:href="mailto:gdzeroxx@sina.com">gdzeroxx@sina.com</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-17">
<day>17</day>
<month>02</month>
<year>2026</year>
</pub-date>
<pub-date publication-format="electronic" date-type="collection">
<year>2026</year>
</pub-date>
<volume>17</volume>
<elocation-id>1774729</elocation-id>
<history>
<date date-type="received">
<day>24</day>
<month>12</month>
<year>2025</year>
</date>
<date date-type="rev-recd">
<day>19</day>
<month>01</month>
<year>2026</year>
</date>
<date date-type="accepted">
<day>02</day>
<month>02</month>
<year>2026</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2026 Dai, Wu, Kang, Shen and Zhong.</copyright-statement>
<copyright-year>2026</copyright-year>
<copyright-holder>Dai, Wu, Kang, Shen and Zhong</copyright-holder>
<license>
<ali:license_ref start_date="2026-02-17">https://creativecommons.org/licenses/by/4.0/</ali:license_ref>
<license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p>
</license>
</permissions>
<abstract>
<p>Benign paroxysmal positional vertigo (BPPV) is one of the most prevalent peripheral vertigo disorders in clinical practice. Its definitive diagnosis relies heavily on characteristic nystagmus induced by positional provocative tests, which imposes high requirements on clinicians and is subject to limitations such as strong subjectivity. The breakthrough advances in artificial intelligence (AI) technologies have provided innovative solutions for the accurate diagnosis and personalized treatment of BPPV. This review systematically summarizes the research progress of AI in the clinical application of BPPV, its enormous potential to improve BPPV diagnostic efficacy, and future directions for development.</p>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>benign paroxysmal positional vertigo</kwd>
<kwd>deep learning</kwd>
<kwd>machine learning</kwd>
<kwd>nystagmus</kwd>
<kwd>vertigo</kwd>
</kwd-group>
<funding-group>
<funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement>
</funding-group>
<counts>
<fig-count count="2"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="47"/>
<page-count count="8"/>
<word-count count="5801"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Artificial Intelligence in Neurology</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Episodes of BPPV are mostly associated with specific changes in head position, with common precipitating factors including movements such as getting in and out of bed, turning over in bed, tilting the head backward, and bending it forward; its core clinical feature is paroxysmal and transient vertigo that typically lasts no more than 1&#x202F;min, and its pathogenic mechanism is directly linked to changes in the position of the head relative to the gravitational field. The widely accepted pathological mechanism of BPPV is that calcium carbonate otoconial crystals detach from the utricle of the inner ear, then float freely in the lumen of the semicircular canals or adhere to the wall of the ampulla, leading to abnormally increased sensitivity of the vestibular labyrinth to gravitational stimuli (<xref ref-type="bibr" rid="ref1">1</xref>). As the most prevalent peripheral vertigo in clinical practice, BPPV accounts for approximately 20&#x2013;30% of all patients with vertigo, with the peak age of onset occurring around 60&#x202F;years; in addition, this disorder has a relatively high recurrence tendency, with an annual recurrence rate of about 15&#x2013;20% (<xref ref-type="bibr" rid="ref2">2</xref>). In clinical diagnosis and treatment, the posterior semicircular canal is the most commonly affected site in BPPV cases, accounting for roughly 85% of all instances. The horizontal semicircular canal is involved in approximately 15% of cases, whereas involvement of the anterior semicircular canal is the rarest, with an incidence rate of only about 1% (<xref ref-type="bibr" rid="ref3 ref4 ref5">3&#x2013;5</xref>).</p>
<p>Most middle-aged and elderly patients have comorbid chronic conditions such as hypertension and diabetes mellitus, which complicates the diagnosis of BPPV; such misdiagnosis frequently leads to the implementation of unnecessary diagnostic procedures, patient referrals, and therapeutic interventions. If not diagnosed and treated promptly, BPPV can lead to a decline in quality of life and an increased risk of falls, which are the leading cause of hospitalization among the elderly due to injuries and trauma (<xref ref-type="bibr" rid="ref6">6</xref>).</p>
<p>In the traditional diagnostic and treatment model for BPPV, clinicians can identify the affected semicircular canal by having patients adopt different body positions (Dix-Hallpike test and Roll test), observing their eye movements, and asking whether they experience vertigo symptoms, after which targeted repositioning therapy is administered. However, this diagnostic and treatment pathway has certain limitations: the procedure is time-consuming and laborious for patients with obesity or cervical spine disorders; patients may be uncooperative and unable to keep their eyes open, and the approach places high demands on clinicians&#x2019; clinical experience and operational skills, rendering it highly susceptible to subjective factors. AI technology, therefore, offers a vital direction for innovation in this field (<xref ref-type="bibr" rid="ref7 ref8 ref9">7&#x2013;9</xref>). <xref ref-type="fig" rid="fig1">Figure 1</xref> shows a schematic diagram of the clinical workflows for different diagnostic and treatment methods.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption>
<p>Workflow diagrams of different methods.</p>
</caption>
<graphic xlink:href="fneur-17-1774729-g001.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">Flowchart illustrating three workflows for diagnosing and treating BPPV: traditional clinical workflow uses manual positional testing and observation, clinical workflow with deep learning integrates machine-performed tests and deep learning models, and machine learning data analysis workflow uses patient data to make BPPV-related predictions.</alt-text>
</graphic>
</fig>
<p>This review focuses on the application methods of AI in the auxiliary diagnosis of BPPV, systematically summarizes the relevant research progress, thoroughly discusses the specific challenges faced by the application of neural network technology in this field and the corresponding solutions, and analyzes the limitations of current research as well as future development directions.</p>
</sec>
<sec sec-type="methods" id="sec2">
<label>2</label>
<title>Methods</title>
<p>This systematic review was conducted in accordance with the PRISMA guidelines to ensure transparency, reproducibility, and methodological rigor. A literature search was performed across the PubMed, Web of Science, and Google Scholar databases for articles published between January 2015 and June 2025. The search strategy for this study centered on key terms: artificial intelligence, benign paroxysmal positional vertigo, nystagmus, and vertigo; these terms were combined via Boolean operators (AND, OR) to ensure comprehensive coverage of relevant literature.</p>
<p>Initial retrieval using this strategy yielded a total of 634 articles. After removing duplicate entries, 531 articles remained for further evaluation. A preliminary screening based on titles and abstracts excluded 472 articles, leaving 59 studies for full-text review. Following a detailed assessment, 35 additional articles were excluded on the basis of eligibility criteria. Ultimately, 24 articles that met all inclusion standards were included in the final analysis. The study selection process is illustrated in <xref ref-type="fig" rid="fig2">Figure 2</xref>, following the PRISMA flow diagram.</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption>
<p>PRISMA flow diagram.</p>
</caption>
<graphic xlink:href="fneur-17-1774729-g002.tif" mimetype="image" mime-subtype="tiff">
<alt-text content-type="machine-generated">PRISMA flow diagram shows the identification, screening, and inclusion of studies for a review. Records were identified from three databases, duplicates removed, with a final inclusion of twenty-four studies.</alt-text>
</graphic>
</fig>
</sec>
<sec id="sec3">
<label>3</label>
<title>Fundamentals of artificial intelligence technology</title>
<p>Machine learning is one of the core technologies of artificial intelligence, among which supervised learning and unsupervised learning are the most widely adopted. Supervised learning relies on labeled training data to learn the mapping relationship between inputs and outputs, enabling the prediction of new data, and is mostly applied to solve classification or regression problems. In contrast, unsupervised learning only uses unlabeled data to drive models to autonomously mine hidden structures and inherent patterns in data, and is usually employed for clustering or dimensionality reduction tasks (<xref ref-type="bibr" rid="ref10">10</xref>).</p>
<p>As an important branch of machine learning, deep learning takes multi-layer neural networks as its core architecture and can automatically extract high-dimensional features and identify complex data patterns from massive datasets (<xref ref-type="bibr" rid="ref11">11</xref>). Convolutional Neural Networks possess excellent autonomous learning and feature representation capabilities; after training, they can complete automatic feature extraction from raw input data without human supervision. By comparison, Recurrent Neural Networks leverage cyclic structural units to effectively process sequential data, capturing sequential dependencies in data by transmitting time-series information across different moments (<xref ref-type="bibr" rid="ref12">12</xref>, <xref ref-type="bibr" rid="ref13">13</xref>).</p>
<p>Machine learning provides basic support for data pattern mining through differentiated data processing modes, while deep learning enhances the advantages of complex feature extraction and sequential data processing via specific network structures. These technical characteristics lay a solid foundation for the application of artificial intelligence in scenarios such as feature analysis, mechanism exploration, and prognosis prediction for the clinical auxiliary diagnosis of BPPV (<xref ref-type="bibr" rid="ref14">14</xref>).</p>
</sec>
<sec id="sec4">
<label>4</label>
<title>Application of artificial intelligence in nystagmus analysis</title>
<sec id="sec5">
<label>4.1</label>
<title>Eye tracking</title>
<p>Nystagmus refers to the involuntary, rhythmic, reciprocating movement of the eyes when fixing gaze on a specific point. Based on the direction of this rhythmic oscillation, nystagmus can be classified into horizontal nystagmus, vertical nystagmus, and torsional nystagmus. In clinical diagnosis, the core diagnostic criterion for BPPV is the nystagmus manifestations observed during a patient&#x2019;s vertigo episode. The types of nystagmus induced by BPPV mainly include horizontal nystagmus and vertical upbeat nystagmus with a torsional component. Currently, the standard clinical tool for data acquisition is the head-mounted video oculography system. This device incorporates infrared cameras, infrared light-emitting diodes, and goggle assemblies to enable accurate capture of dynamic eye movement videos. In AI-assisted BPPV diagnostic research, the accurate localization of the ocular region serves as an indispensable core prerequisite and fundamental step.</p>
<p>Rodrigues et al. (<xref ref-type="bibr" rid="ref15">15</xref>) proposed the At-UNet neural network model, which adopts VGG16 as its backbone encoder. By integrating an attention module and a multi-task learning framework, the model achieves simultaneous and accurate segmentation of the pupillary region, yielding a Dice coefficient of 96.20% for pupil segmentation on the UTIRIS dataset. However, the model suffers from a large number of parameters, which hinders its lightweight deployment in clinical settings. Wei et al. (<xref ref-type="bibr" rid="ref16">16</xref>) combined the YOLOv5 object detection network with an improved DeepLabv3&#x202F;+&#x202F;segmentation module; precise pupillary coordinates were obtained via ellipse fitting of segmentation masks, with the final intersection over union (IoU) reaching 95.95%. While this approach is well-suited for real-time clinical requirements, the ellipse fitting method exhibits poor adaptability to pathologically irregular pupils and is thus prone to generating deviations.</p>
<p>Ideal acquisition of patients&#x2019; ocular information is often hindered by various interferences. Cho et al. (<xref ref-type="bibr" rid="ref17">17</xref>) developed a lightweight multi-task model that integrates a blink detection module into the pipeline of pupil localization and tracking, which specifically addresses invalid frames caused by eye blinking; the model achieved IoU values of 92.81 and 90.73% on the OpenEDS and HUSHH datasets, respectively. While well-suited for dynamic clinical scenarios and characterized by low deployment barriers, this multi-task architecture leads to slightly reduced precision in individual tasks, and its sensitivity to the recognition of rapid consecutive eye blinks requires further improvement. To evaluate the performance of pupil segmentation algorithms under different noise conditions, Ju-Hyuck et al. (<xref ref-type="bibr" rid="ref18">18</xref>) proposed a combined RANSAC+U-Net scheme. Results showed that the RANSAC+U-Net combined algorithm performed optimally in the noise-free scenario with a mean squared error (MSE) of 0.0620; the standalone U-Net algorithm excelled in the optical noise scenario with an MSE of 0.0694; and the standalone RANSAC algorithm yielded the best performance in the motion blur noise scenario with an MSE of 0.0717. However, all algorithms exhibited poor performance in the presence of human-induced noise, such as occlusion by eyelids and eyelashes. The core underlying reason is the lack of critical image features, coupled with the absence of effective feature completion mechanisms in existing models, which renders them unable to accommodate the variability of complex physiological structures.</p>
<p>Choi et al. (<xref ref-type="bibr" rid="ref19">19</xref>) proposed an automatic eyeglass removal method based on the CycleGAN network. The primary objective of a generative adversarial network is to use a discriminator to calculate the distribution of original samples, while a generator works to generate new samples from real data samples (<xref ref-type="bibr" rid="ref20">20</xref>). This method is applied to supplement the key information of the ocular region that is lost due to the presence of eyeglasses. In future research, we can also explore solutions to problems such as data loss and noise interference in pupil localization by leveraging generative adversarial networks.</p>
<p>In summary, compared with the iris, the pupil offers the advantages of high stability, strong anti-interference capability, and low algorithm complexity, making it more suitable for meeting the technical requirements of nystagmus video analysis and aligning with the core clinical diagnostic demands of speed, real-time performance, and anti-interference capability. Although artificial intelligence is currently capable of pupil localization and tracking, there remains significant room for optimization in numerous aspects.</p>
</sec>
<sec id="sec6">
<label>4.2</label>
<title>Application of ocular movement trajectory</title>
<p>The ocular movement trajectory plot converts subjectively observable nystagmus into objectively quantifiable trajectory curves and data, thus avoiding deviations caused by manual judgment. To address the challenge of accurate pupil tracking, Lee et al. (<xref ref-type="bibr" rid="ref21">21</xref>) developed ANyEye, an AI-assisted nystagmus video analysis system that integrates a compensation algorithm to correct pupil positions, achieving a detection rate of 91.26% for pupil tracking within a 5-pixel error margin, making it well-suited for dynamic clinical tracking applications. However, its adaptability to high-velocity nystagmus scenarios was not reported in the study. Deng et al. (<xref ref-type="bibr" rid="ref22">22</xref>) proposed the lower pole of pupil algorithm and employed ResNet34 for classifying four common subtypes, with the accuracy rate reaching 95.55%. Regarding the issue of data loss caused by pupil occlusion due to various factors, Mun et al. (<xref ref-type="bibr" rid="ref23">23</xref>) pointed out that linear interpolation, if adopted as a missing data bridging algorithm, might inadvertently generate nystagmus-like motion artifacts; filling missing values with the pupil position detected at the previous moment (denoted as NA) yields better results. Ultimately, the CNN1D model was used, achieving an accuracy rate of 91.02%. This approach provides a quantitative reference for missing data handling, though its effectiveness in scenarios involving prolonged pupil occlusion remains to be verified.</p>
<p>Due to the limited information that can be conveyed by a single trajectory plot, many researchers have begun to explore converting trajectory plots into other forms of information for nystagmus identification. Dogru et al. (<xref ref-type="bibr" rid="ref24">24</xref>) transformed the original trajectories into polar coordinates and calculated angular changes via template matching, successfully addressing the challenge of torsional nystagmus detection. Qiu et al. (<xref ref-type="bibr" rid="ref25">25</xref>) completed classification after converting trajectories into Gram matrix feature images, achieving a Top-1 accuracy of 85.47%. Lee et al. (<xref ref-type="bibr" rid="ref26">26</xref>) utilized the wavelet transform to convert time-series signals into time-frequency images, which were ultimately fed into the EfficientNet convolutional neural network for classification, yielding an overall accuracy of 87%. Although different data conversion strategies can improve the accuracy of nystagmus identification from specific dimensions, such transformation processes are often associated with several limitations. These drawbacks include the easy loss of temporal information during feature mapping and the lack of unified criteria for wavelet transform parameter settings, which exert a notable impact on the final results.</p>
<p>In addition, the adoption of a multimodal approach that incorporates more clinical information for analysis can improve diagnostic accuracy. Wu et al. (<xref ref-type="bibr" rid="ref27">27</xref>) converted eight features, including head trajectory, eye movement trajectory, and their corresponding slow-phase velocity values, into 1D data as input. Nguyen et al. (<xref ref-type="bibr" rid="ref28">28</xref>) fused five-channel time-series data consisting of horizontal eye movement, vertical eye movement, pupil radius, horizontal velocity, and vertical velocity. Going beyond nystagmus-related information alone, Liu et al. (<xref ref-type="bibr" rid="ref29">29</xref>) adopted a multi-technology fusion strategy combining image features and signal analysis to conduct comprehensive nystagmus detection. Although such multimodal fusion methods can significantly enhance the generalization ability of models in clinical settings, they inevitably increase model complexity, thereby raising the bar for clinical deployment and implementation.</p>
<p>In clinical practice, physicians still rely primarily on the direct interpretation of eye movement images as the main diagnostic basis, and ocular movement trajectory plots have not yet become routine core diagnostic tools. Notably, current intelligent classification research on BPPV based on ocular movement trajectories has demonstrated significant clinical effectiveness and promising application prospects.</p>
</sec>
<sec id="sec7">
<label>4.3</label>
<title>Application of nystagmus videos</title>
<p>With the gradual emergence of intelligent video analysis technology as a research hotspot in the field, relevant research directions have also begun to focus on nystagmus video analysis, which serves as the core carrier for clinical diagnosis. Li et al. (<xref ref-type="bibr" rid="ref30">30</xref>, <xref ref-type="bibr" rid="ref31">31</xref>) designed different deep learning algorithms integrating multiple modules for vertical nystagmus and torsional nystagmus, achieving an accuracy of 91 and 96.1%, respectively. However, the adaptability of these algorithms to complex clinical scenarios in practical experiments remains to be verified. Lim et al. (<xref ref-type="bibr" rid="ref32">32</xref>) developed a 2D-CNN model that converts the 3D eye movement features in videos into grid images for classification. Results showed that the area under the curve (AUC) for horizontal nystagmus and vertical nystagmus reached 0.966 and 0.952, respectively, while the AUC for torsional nystagmus was only 0.853. The main limitation lies in the fact that the identification of torsional nystagmus relies on the accurate capture of iris rotation states. In clinical infrared videos, low brightness and contrast often blur iris textures, impeding feature extraction.</p>
<p>To address the problem of limited recognition accuracy for torsional nystagmus, researchers have introduced optical flow technology. The core principle of optical flow is to estimate pixel displacement between consecutive video frames for accurate capture of motion dynamics (<xref ref-type="bibr" rid="ref33">33</xref>). Kong et al. (<xref ref-type="bibr" rid="ref34">34</xref>) used LiteFlowNet to extract optical flow features, which were then fused and classified via the nystagmus video classification network based on temporal modeling. This method achieved an F1-score of 0.98 for torsional nystagmus, surpassing the 0.928 score obtained for non-torsional nystagmus. Zhang et al. (<xref ref-type="bibr" rid="ref35">35</xref>) proposed a Torsion-aware Bi-Stream Identification Network, which inputs optical flow in the x and y directions into the two-stream network for torsional nystagmus recognition, reaching an accuracy of 85.73% in clinical evaluations. Model designs incorporating optical flow fields are more compatible with the characteristics of clinical videos, effectively resolving the recognition challenges caused by blurred iris textures. Nevertheless, optical flow feature extraction imposes certain computational requirements, which may increase the costs associated with clinical deployment.</p>
<p>In addition to optical flow features, conducting multimodal fusion research that incorporates the multidimensional clinical characteristics of vertigo associated with BPPV can also effectively improve the diagnostic accuracy of the model. Lu et al. (<xref ref-type="bibr" rid="ref36">36</xref>) encoded head position vectors using an autoencoder to capture spatial information, and fused the encoded information with video features via a cross-attention mechanism, achieving an average accuracy of 81.7%. While this approach enabled the synergistic utilization of head posture and eye movement information, it suffered from the drawback of high computational complexity during the feature fusion process. Pham et al. (<xref ref-type="bibr" rid="ref37">37</xref>) developed a hybrid deep learning system named &#x201C;Look and Diagnose&#x201D;, which integrates body posture and binocular vision information. The system first detects posterior semicircular canal BPPV and then classifies non-posterior semicircular canal otolithiasis, with an overall classification accuracy of 91% and demonstrating strong alignment with clinical diagnostic workflows. <xref ref-type="table" rid="tab1">Table 1</xref> systematically presents the research on deep learning related to nystagmus video analysis. These studies indicate that deep learning has currently achieved favorable results in the clinical image analysis of BPPV and can provide references for clinical practice. Nevertheless, continuous optimizations are still required in terms of computational cost control, generalization capability in complex scenarios, and adaptability to clinical workflows.</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption>
<p>Summary of deep learning research related to nystagmus analysis.</p>
</caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Researchers</th>
<th align="left" valign="top">Data set</th>
<th align="left" valign="top">Model</th>
<th align="left" valign="top">Performance</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="middle">Rodrigues et al. (2024) (<xref ref-type="bibr" rid="ref15">15</xref>)</td>
<td align="left" valign="middle">433 videos</td>
<td align="left" valign="middle">At-Unet+Attention</td>
<td align="left" valign="middle">Dice similarity coefficient: 96.2%</td>
</tr>
<tr>
<td align="left" valign="middle">Wei et al. (2022) (<xref ref-type="bibr" rid="ref16">16</xref>)</td>
<td align="left" valign="middle">TEyeD</td>
<td align="left" valign="middle">YOLOv5-DeepLabv3+</td>
<td align="left" valign="middle">IOU: 95.95%</td>
</tr>
<tr>
<td align="left" valign="middle">Cho et al. (2024) (<xref ref-type="bibr" rid="ref17">17</xref>)</td>
<td align="left" valign="middle">HUSHH+OpenEDS</td>
<td align="left" valign="middle">Lightweight model</td>
<td align="left" valign="middle">IOU: 90.73% (HUSHH), 92.81% (OpenEDS)</td>
</tr>
<tr>
<td align="left" valign="middle">Ju-Hyuck et al. (2025) (<xref ref-type="bibr" rid="ref18">18</xref>)</td>
<td align="left" valign="middle">CASIA-Iris-Degradation</td>
<td align="left" valign="middle">ANSAC+U-Net</td>
<td align="left" valign="middle">MSE: 0.0620</td>
</tr>
<tr>
<td align="left" valign="middle">Lee et al. (2023) (<xref ref-type="bibr" rid="ref21">21</xref>)</td>
<td align="left" valign="middle">52 patients</td>
<td align="left" valign="middle">ANyEye</td>
<td align="left" valign="middle">Detection rate at 5-pixel error: 91.26%</td>
</tr>
<tr>
<td align="left" valign="middle">Deng et al. (2023) (<xref ref-type="bibr" rid="ref22">22</xref>)</td>
<td align="left" valign="middle">433 nystagmus videos</td>
<td align="left" valign="middle">ResNet34</td>
<td align="left" valign="middle">Accuracy: 95.55%</td>
</tr>
<tr>
<td align="left" valign="middle">Mun et al. (2024) (<xref ref-type="bibr" rid="ref23">23</xref>)</td>
<td align="left" valign="middle">828 patients</td>
<td align="left" valign="middle">2D U-Net&#x202F;+&#x202F;CNN1D</td>
<td align="left" valign="middle">Accuracy: 91.02&#x202F;&#x00B1;&#x202F;0.66%</td>
</tr>
<tr>
<td align="left" valign="middle">Qiu et al. (2023) (<xref ref-type="bibr" rid="ref25">25</xref>)</td>
<td align="left" valign="middle">646 VNG videos</td>
<td align="left" valign="middle">Gram-AODE</td>
<td align="left" valign="middle">Top-1 accuracy: 85.47%</td>
</tr>
<tr>
<td align="left" valign="middle">Lee et al. (2024) (<xref ref-type="bibr" rid="ref26">26</xref>)</td>
<td align="left" valign="middle">947 VNG videos</td>
<td align="left" valign="middle">EfficientNet</td>
<td align="left" valign="middle">Accuracy: 87%</td>
</tr>
<tr>
<td align="left" valign="middle">Wu et al. (2023) (<xref ref-type="bibr" rid="ref27">27</xref>)</td>
<td align="left" valign="middle">3,296 patients</td>
<td align="left" valign="middle">1DCNN-BiLSTM-Self-attention</td>
<td align="left" valign="middle">Accuracy: 93.3&#x202F;&#x00B1;&#x202F;1.0%</td>
</tr>
<tr>
<td align="left" valign="middle">Nguyen et al. (2025) (<xref ref-type="bibr" rid="ref28">28</xref>)</td>
<td align="left" valign="middle">LAD</td>
<td align="left" valign="middle">EfficientNet-B0&#x202F;+&#x202F;1D CNN</td>
<td align="left" valign="middle">Accuracy: 91%</td>
</tr>
<tr>
<td align="left" valign="middle">Liu et al. (2025) (<xref ref-type="bibr" rid="ref29">29</xref>)</td>
<td align="left" valign="middle">60 patients</td>
<td align="left" valign="middle">Egeunet</td>
<td align="left" valign="middle">Accuracy: 93.33%</td>
</tr>
<tr>
<td align="left" valign="middle">Li et al. (2023) (<xref ref-type="bibr" rid="ref30">30</xref>)</td>
<td align="left" valign="middle">21,743 videos</td>
<td align="left" valign="middle">Bilstm&#x2212;GRU module</td>
<td align="left" valign="middle">Accuracy: 91%</td>
</tr>
<tr>
<td align="left" valign="middle">Li et al. (2023) (<xref ref-type="bibr" rid="ref31">31</xref>)</td>
<td align="left" valign="middle">24,521 videos</td>
<td align="left" valign="middle">Inception+BiLSTM</td>
<td align="left" valign="middle">Accuracy: 96.1%</td>
</tr>
<tr>
<td align="left" valign="middle">Lim et al. (2019) (<xref ref-type="bibr" rid="ref32">32</xref>)</td>
<td align="left" valign="middle">91,778 videos</td>
<td align="left" valign="middle">2D-CNN</td>
<td align="left" valign="middle">F1-score: 0.794&#x202F;&#x00B1;&#x202F;0.008</td>
</tr>
<tr>
<td align="left" valign="middle">Kong et al. (2023) (<xref ref-type="bibr" rid="ref34">34</xref>)</td>
<td align="left" valign="middle">728 videos</td>
<td align="left" valign="middle">ConvNeXt+ LSTM+ Optical flow</td>
<td align="left" valign="middle">F1-score: Non-torsional nystagmus: 0.928, Torsional nystagmus: 0.98</td>
</tr>
<tr>
<td align="left" valign="middle">Zhang et al. (2021) (<xref ref-type="bibr" rid="ref35">35</xref>)</td>
<td align="left" valign="middle">77 videos</td>
<td align="left" valign="middle">TSBIN</td>
<td align="left" valign="middle">Accuracy: 85.73%</td>
</tr>
<tr>
<td align="left" valign="middle">Lu et al. (2024) (<xref ref-type="bibr" rid="ref36">36</xref>)</td>
<td align="left" valign="middle">518 patients</td>
<td align="left" valign="middle">BKTDN+ self-encoder+cross-attention</td>
<td align="left" valign="middle">Accuracy: 81.7%</td>
</tr>
<tr>
<td align="left" valign="middle">Pham et al. (2022) (<xref ref-type="bibr" rid="ref37">37</xref>)</td>
<td align="left" valign="middle">746 data from patients</td>
<td align="left" valign="middle">&#x201C;Look and Diagnose&#x201D;</td>
<td align="left" valign="middle">Accuracy: 91%</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="sec8">
<label>5</label>
<title>Application of artificial intelligence in clinical practice of BPPV</title>
<sec id="sec9">
<label>5.1</label>
<title>Diagnosis of BPPV</title>
<p>In clinical practice, physicians usually make a preliminary diagnosis of the disease by inquiring about the characteristics, triggers, and course of vertigo episodes, as well as the presence of accompanying symptoms or medical history. Accurately differentiating BPPV from other vestibular disorders requires a certain level of clinical experience on the part of physicians, which results in relatively high rates of missed diagnosis and misdiagnosis of BPPV in clinical practice.</p>
<p>In a machine learning study involving 7,660 patients, Khani et al. (<xref ref-type="bibr" rid="ref38">38</xref>) allocated BPPV patients and non-BPPV controls at a ratio of 1:1. After preprocessing the demographic characteristics and clinical history features, they adopted multiple machine learning models to predict BPPV. The results showed that the gradient boosting model exhibited the best performance, with an accuracy rate of 85.422%. Compared with the approximately 70% misdiagnosis rate of peripheral vestibular disorders in emergency departments (<xref ref-type="bibr" rid="ref39">39</xref>), this method achieved a significant improvement. Han et al. (<xref ref-type="bibr" rid="ref40">40</xref>) established a multivariate logistic regression prediction model based on the clinical data and biomarkers of 522 patients, which achieved an AUC of 0.927.</p>
<p>Soylemez et al. (<xref ref-type="bibr" rid="ref41">41</xref>) analyzed 280 patients with posterior semicircular canal BPPV. The results indicated that age, symptom onset time, symptom duration, dizziness type, triggering factors, and auditory symptom status were significant features. Using a random forest model, the diagnostic accuracy for posterior semicircular canal BPPV reached 96.43%.</p>
</sec>
<sec id="sec10">
<label>5.2</label>
<title>Pathophysiological mechanism of BPPV</title>
<p>To date, the pathophysiological mechanism of BPPV has not been fully elucidated. Previous studies have suggested that it may be associated with factors such as hypertension and vitamin D deficiency (<xref ref-type="bibr" rid="ref42">42</xref>). Based on a large-sample systematic analysis, Han et al. (<xref ref-type="bibr" rid="ref40">40</xref>) used laboratory biomarker information as the core input&#x2014;including routine blood test parameters, inflammatory and metabolic indicators, etc. The results indicated that disease course, neutrophil count, lymphocyte count, C-reactive protein, ferritin levels, and vitamin D deficiency were identified as independent risk factors for BPPV, while monocyte count was found to be a protective factor. These findings further suggest that inflammatory responses, iron metabolism disorders, and vitamin D deficiency may contribute to the development of BPPV.</p>
</sec>
<sec id="sec11">
<label>5.3</label>
<title>Prognostic prediction of BPPV</title>
<p>There are significant individual differences in the clinical phenotypes and treatment outcomes of BPPV. Clinical data indicate that the proportion of patients requiring multiple canalith repositioning procedures can reach 36.6% (<xref ref-type="bibr" rid="ref43">43</xref>). To accurately identify populations at high risk of multiple repositioning treatments and optimize clinical intervention strategies, Baydan-Aran et al. (<xref ref-type="bibr" rid="ref44">44</xref>) retrospectively enrolled 520 BPPV patients. They selected clinical baseline data as input features, including age, gender, BPPV subtypes, and comorbidities (such as hypertension, diabetes mellitus, cervical disorders, and hearing loss). Nine machine learning prediction models were constructed and compared. The results showed that the gradient boosting machine model exhibited the optimal performance with an AUC of 0.788. Further analysis revealed that age, hypertension, and hearing loss were key influencing factors for multiple repositioning treatments, among which hypertension exerted the most significant impact. This study provides a quantitative tool for the clinical identification of patients at risk of requiring multiple repositioning procedures.</p>
</sec>
</sec>
<sec sec-type="discussion" id="sec12">
<label>6</label>
<title>Discussion</title>
<p>As a common benign vertigo disorder, BPPV has relatively straightforward treatment methods. However, its clinical manifestations are easily confused with other vertigo-related conditions such as Meniere&#x2019;s disease and vestibular neuritis, resulting in persistently high rates of missed diagnosis and misdiagnosis. Moreover, recurrent episodes can severely affect patients&#x2019; quality of daily life, thus deserving greater attention. AI technology has emerged as a core driving force for advancing the precise diagnosis and treatment of BPPV, demonstrating tremendous potential both as an adjunct in clinical diagnostic workflows and in predicting the prognosis of repositioning therapy outcomes. Nevertheless, several issues and challenges remain regarding the practical application of AI in clinical settings. In many previous studies, researchers prioritized data uniformity by discarding non-compliant or substandard data samples, which led to information loss and limited the generalization ability of the developed models. Oth&#x00E9;guy et al. (<xref ref-type="bibr" rid="ref45">45</xref>) developed an eye-tracking system based on scleral contact lenses. This system consists of electronic components and a camera integrated into a pair of glasses, which is remotely powered by two vertical cavity self-emitting lasers embedded in the scleral lenses. The device can also be safely used to monitor eye movements even when the eyelids are closed, but it is accompanied by problems related to device wearing comfort, safety, and hygiene. In future research, developing a convenient and safe method for detecting eye movements under interference will be a major challenge and key research direction.</p>
<p>In addition, to gain recognition from both physicians and patients in clinical practice, the interpretability and processing speed of AI (<xref ref-type="bibr" rid="ref46">46</xref>), as well as how to balance these three aspects, are issues that need to be addressed. Standardization and popularization of AI-assisted diagnosis constitute a crucial next step for its broader clinical integration (<xref ref-type="bibr" rid="ref47">47</xref>). With the improvement of research in various fields in the future, AI will be able to provide more comprehensive auxiliary diagnosis and treatment schemes for BPPV, enhancing both efficiency and accuracy.</p>
</sec>
</body>
<back>
<sec sec-type="author-contributions" id="sec13">
<title>Author contributions</title>
<p>SD: Writing &#x2013; original draft. YW: Conceptualization, Methodology, Project administration, Writing &#x2013; review &#x0026; editing. XK: Conceptualization, Methodology, Writing &#x2013; review &#x0026; editing. ZS: Writing &#x2013; review &#x0026; editing. PZ: Writing &#x2013; review &#x0026; editing.</p>
</sec>
<sec sec-type="COI-statement" id="sec14">
<title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="ai-statement" id="sec15">
<title>Generative AI statement</title>
<p>The author(s) declared that Generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec sec-type="disclaimer" id="sec16">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="ref1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Imai</surname><given-names>T</given-names></name> <name><surname>Takeda</surname><given-names>N</given-names></name> <name><surname>Ikezono</surname><given-names>T</given-names></name> <name><surname>Shigeno</surname><given-names>K</given-names></name> <name><surname>Asai</surname><given-names>M</given-names></name> <name><surname>Watanabe</surname><given-names>Y</given-names></name> <etal/></person-group>. <article-title>Classification, diagnostic criteria and management of benign paroxysmal positional vertigo</article-title>. <source>Auris Nasus Larynx</source>. (<year>2017</year>) <volume>44</volume>:<fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.anl.2016.03.013</pub-id>, <pub-id pub-id-type="pmid">27174206</pub-id></mixed-citation></ref>
<ref id="ref2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>J-S</given-names></name> <name><surname>Zee</surname><given-names>DS</given-names></name></person-group>. <article-title>Benign paroxysmal positional vertigo</article-title>. <source>N Engl J Med</source>. (<year>2014</year>) <volume>370</volume>:<fpage>1138</fpage>&#x2013;<lpage>47</lpage>. doi: <pub-id pub-id-type="doi">10.1056/NEJMcp1309481</pub-id>, <pub-id pub-id-type="pmid">24645946</pub-id></mixed-citation></ref>
<ref id="ref3"><label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>H-J</given-names></name> <name><surname>Lee</surname><given-names>J-O</given-names></name> <name><surname>Choi</surname><given-names>J-Y</given-names></name> <name><surname>Kim</surname><given-names>J-S</given-names></name></person-group>. <article-title>Etiologic distribution of dizziness and vertigo in a referral-based dizziness clinic in South Korea</article-title>. <source>J Neurol</source>. (<year>2020</year>) <volume>267</volume>:<fpage>2252</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00415-020-09831-2</pub-id></mixed-citation></ref>
<ref id="ref4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname><given-names>H-J</given-names></name> <name><surname>Park</surname><given-names>J</given-names></name> <name><surname>Kim</surname><given-names>J-S</given-names></name></person-group>. <article-title>Update on benign paroxysmal positional vertigo</article-title>. <source>J Neurol</source>. (<year>2021</year>) <volume>268</volume>:<fpage>1995</fpage>&#x2013;<lpage>2000</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00415-020-10314-7</pub-id>, <pub-id pub-id-type="pmid">33231724</pub-id></mixed-citation></ref>
<ref id="ref5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>Y-L</given-names></name> <name><surname>Wu</surname><given-names>M-Y</given-names></name> <name><surname>Cheng</surname><given-names>P-L</given-names></name> <name><surname>Pei</surname><given-names>S-F</given-names></name> <name><surname>Liu</surname><given-names>Y</given-names></name> <name><surname>Liu</surname><given-names>Y-M</given-names></name></person-group>. <article-title>Analysis of cost and effectiveness of treatment in benign paroxysmal positional vertigo</article-title>. <source>Chin Med J</source>. (<year>2019</year>) <volume>132</volume>:<fpage>342</fpage>&#x2013;<lpage>5</lpage>. doi: <pub-id pub-id-type="doi">10.1097/CM9.0000000000000063</pub-id>, <pub-id pub-id-type="pmid">30681502</pub-id></mixed-citation></ref>
<ref id="ref6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chua</surname><given-names>K</given-names></name> <name><surname>Gans</surname><given-names>RE</given-names></name> <name><surname>Spinks</surname><given-names>S</given-names></name></person-group>. <article-title>Demographic and clinical characteristics of BPPV patients: a retrospective large cohort study of 1599 patients</article-title>. <source>J Otolaryngol ENT Res</source>. (<year>2020</year>) <volume>12</volume>:<fpage>20</fpage>&#x2013;<lpage>30</lpage>. doi: <pub-id pub-id-type="doi">10.15406/joentr.2020.12.00451</pub-id></mixed-citation></ref>
<ref id="ref7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>F</given-names></name> <name><surname>Casalino</surname><given-names>LP</given-names></name> <name><surname>Khullar</surname><given-names>D</given-names></name></person-group>. <article-title>Deep learning in medicine&#x2014;promise, progress, and challenges</article-title>. <source>JAMA Intern Med</source>. (<year>2019</year>) <volume>179</volume>:<fpage>293</fpage>. doi: <pub-id pub-id-type="doi">10.1001/jamainternmed.2018.7117</pub-id>, <pub-id pub-id-type="pmid">30556825</pub-id></mixed-citation></ref>
<ref id="ref8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yang</surname><given-names>S</given-names></name> <name><surname>Zhu</surname><given-names>F</given-names></name> <name><surname>Ling</surname><given-names>X</given-names></name> <name><surname>Liu</surname><given-names>Q</given-names></name> <name><surname>Zhao</surname><given-names>P</given-names></name></person-group>. <article-title>Intelligent health care: applications of deep learning in computational medicine</article-title>. <source>Front Genet</source>. (<year>2021</year>) <volume>12</volume>:<fpage>607471</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fgene.2021.607471</pub-id>, <pub-id pub-id-type="pmid">33912213</pub-id></mixed-citation></ref>
<ref id="ref9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Esteva</surname><given-names>A</given-names></name> <name><surname>Chou</surname><given-names>K</given-names></name> <name><surname>Yeung</surname><given-names>S</given-names></name> <name><surname>Naik</surname><given-names>N</given-names></name> <name><surname>Madani</surname><given-names>A</given-names></name> <name><surname>Mottaghi</surname><given-names>A</given-names></name> <etal/></person-group>. <article-title>Deep learning-enabled medical computer vision</article-title>. <source>NPJ Digit Med</source>. (<year>2021</year>) <volume>4</volume>:<fpage>5</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41746-020-00376-2</pub-id>, <pub-id pub-id-type="pmid">33420381</pub-id></mixed-citation></ref>
<ref id="ref10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname><given-names>C</given-names></name> <name><surname>Young</surname><given-names>AS</given-names></name> <name><surname>Raj</surname><given-names>C</given-names></name> <name><surname>Bradshaw</surname><given-names>AP</given-names></name> <name><surname>Nham</surname><given-names>B</given-names></name> <name><surname>Rosengren</surname><given-names>SM</given-names></name> <etal/></person-group>. <article-title>Machine learning models help differentiate between causes of recurrent spontaneous vertigo</article-title>. <source>J Neurol</source>. (<year>2024</year>) <volume>271</volume>:<fpage>3426</fpage>&#x2013;<lpage>38</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00415-023-11997-4</pub-id></mixed-citation></ref>
<ref id="ref11"><label>11.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Ongsulee</surname><given-names>P.</given-names></name></person-group> <article-title>Artificial intelligence, machine learning and deep learning.</article-title> <source>2017 15th International Conference on ICT and Knowledge Engineering (ICT&#x0026;KE)</source>. <publisher-loc>Bangkok</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2017</year>). <fpage>1</fpage>&#x2013;<lpage>6</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICTKE.2017.8259629</pub-id></mixed-citation></ref>
<ref id="ref12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname><given-names>X</given-names></name> <name><surname>Wang</surname><given-names>L</given-names></name> <name><surname>Zhang</surname><given-names>Y</given-names></name> <name><surname>Han</surname><given-names>X</given-names></name> <name><surname>Deveci</surname><given-names>M</given-names></name> <name><surname>Parmar</surname><given-names>M</given-names></name></person-group>. <article-title>A review of convolutional neural networks in computer vision</article-title>. <source>Artif Intell Rev</source>. (<year>2024</year>) <volume>57</volume>:<fpage>99</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s10462-024-10721-6</pub-id></mixed-citation></ref>
<ref id="ref13"><label>13.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Shiri</surname><given-names>FM</given-names></name> <name><surname>Perumal</surname><given-names>T</given-names></name> <name><surname>Mustapha</surname><given-names>N</given-names></name> <name><surname>Mohamed</surname><given-names>R</given-names></name></person-group>. <article-title>A comprehensive overview and comparative analysis on deep learning models: CNN, RNN, LSTM, GRU</article-title>. <source>arXiv</source> (<year>2023</year>)[Preprint]. Available at: <ext-link xlink:href="https://arxiv.org/abs/2305.17473" ext-link-type="uri">https://arxiv.org/abs/2305.17473</ext-link> (Accessed July 11, 2025).</mixed-citation></ref>
<ref id="ref14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alzubaidi</surname><given-names>L</given-names></name> <name><surname>Zhang</surname><given-names>J</given-names></name> <name><surname>Humaidi</surname><given-names>AJ</given-names></name> <name><surname>Al-Dujaili</surname><given-names>A</given-names></name> <name><surname>Duan</surname><given-names>Y</given-names></name> <name><surname>Al-Shamma</surname><given-names>O</given-names></name> <etal/></person-group>. <article-title>Review of deep learning: concepts, CNN architectures, challenges, applications, future directions</article-title>. <source>J Big Data</source>. (<year>2021</year>) <volume>8</volume>:<fpage>53</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s40537-021-00444-8</pub-id>, <pub-id pub-id-type="pmid">33816053</pub-id></mixed-citation></ref>
<ref id="ref15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rodrigues</surname><given-names>FC</given-names></name> <name><surname>Quintanilha</surname><given-names>DBP</given-names></name> <name><surname>De Paiva</surname><given-names>AC</given-names></name> <name><surname>Silva</surname><given-names>AC</given-names></name> <name><surname>De Almeida</surname><given-names>JSD</given-names></name> <name><surname>Braz</surname><given-names>G</given-names></name></person-group>. <article-title>Deep learning for detecting dilated or contracted pupils</article-title>. <source>Biomed Signal Process Control</source>. (<year>2024</year>) <volume>94</volume>:<fpage>106360</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2024.106360</pub-id></mixed-citation></ref>
<ref id="ref16"><label>16.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Wei</surname><given-names>K</given-names></name> <name><surname>Yang</surname><given-names>Q</given-names></name> <name><surname>Yang</surname><given-names>X</given-names></name> <name><surname>Liu</surname><given-names>Z</given-names></name></person-group>. "<article-title>Application of a pupil tracking method based on Yolov5-Deeplabv3+ fusion network on a new BPPV nystagmus recorder</article-title>" In: <person-group person-group-type="editor"><name><surname>El-Hashash</surname><given-names>A</given-names></name></person-group>, editor. <source>International conference on biomedical and intelligent systems (IC-BIS 2022)</source>. <publisher-loc>Chengdu</publisher-loc>: <publisher-name>SPIE</publisher-name> (<year>2022</year>). <fpage>120</fpage>.</mixed-citation></ref>
<ref id="ref17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Cho</surname><given-names>C</given-names></name> <name><surname>Park</surname><given-names>S</given-names></name> <name><surname>Ma</surname><given-names>S</given-names></name> <name><surname>Lee</surname><given-names>H-J</given-names></name> <name><surname>Lim</surname><given-names>E-C</given-names></name> <name><surname>Hong</surname><given-names>SK</given-names></name></person-group>. <article-title>Feasibility of video-based real-time nystagmus tracking: a lightweight deep learning model approach using ocular object segmentation</article-title>. <source>Front Neurol</source>. (<year>2024</year>) <volume>15</volume>:<fpage>1342108</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fneur.2024.1342108</pub-id>, <pub-id pub-id-type="pmid">38450068</pub-id></mixed-citation></ref>
<ref id="ref18"><label>18.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Ju-Hyuck</surname><given-names>H</given-names></name> <name><surname>Hea-In</surname><given-names>L</given-names></name> <name><surname>Dong-Kwon</surname><given-names>J</given-names></name> <name><surname>Han-Jo</surname><given-names>L</given-names></name> <name><surname>Woong-Sik</surname><given-names>K</given-names></name></person-group> <article-title>Noise-resilient pupil segmentation in VNG using RANSAC-enhanced U-net for improved BPPV diagnosis</article-title>. <conf-name>2025 International Conference on Electronics, Information, and Communication (ICEIC)</conf-name>. <publisher-loc>Osaka</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2025</year>). <fpage>1</fpage>&#x2013;<lpage>4</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICEIC64972.2025.10879684</pub-id></mixed-citation></ref>
<ref id="ref19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Choi</surname><given-names>JH</given-names></name> <name><surname>Lee</surname><given-names>KI</given-names></name> <name><surname>Song</surname><given-names>BC</given-names></name></person-group>. <article-title>Eye pupil localization algorithm using convolutional neural networks</article-title>. <source>Multimed Tools Appl</source>. (<year>2020</year>) <volume>79</volume>:<fpage>32563</fpage>&#x2013;<lpage>74</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s11042-020-09711-x</pub-id></mixed-citation></ref>
<ref id="ref20"><label>20.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Karthika</surname><given-names>S</given-names></name> <name><surname>Durgadevi</surname><given-names>M</given-names></name></person-group>. <article-title>Generative adversarial network (GAN): a general review on different variants of GAN and applications</article-title>. <conf-name>2021 6th International Conference on Communication and Electronics Systems (ICCES)</conf-name>. <publisher-loc>Coimbatre</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2021</year>). <fpage>1</fpage>&#x2013;<lpage>8</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ICCES51350.2021.9489160</pub-id></mixed-citation></ref>
<ref id="ref21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>Y</given-names></name> <name><surname>Lee</surname><given-names>S</given-names></name> <name><surname>Han</surname><given-names>J</given-names></name> <name><surname>Seo</surname><given-names>YJ</given-names></name> <name><surname>Yang</surname><given-names>S</given-names></name></person-group>. <article-title>A nystagmus extraction system using artificial intelligence for video-nystagmography</article-title>. <source>Sci Rep</source>. (<year>2023</year>) <volume>13</volume>:<fpage>11975</fpage>. doi: <pub-id pub-id-type="doi">10.1038/s41598-023-39104-7</pub-id>, <pub-id pub-id-type="pmid">37488184</pub-id></mixed-citation></ref>
<ref id="ref22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Deng</surname><given-names>W</given-names></name> <name><surname>Huang</surname><given-names>J</given-names></name> <name><surname>Kong</surname><given-names>S</given-names></name> <name><surname>Zhan</surname><given-names>Y</given-names></name> <name><surname>Lv</surname><given-names>J</given-names></name> <name><surname>Cui</surname><given-names>Y</given-names></name></person-group>. <article-title>Pupil trajectory tracing from video-oculography with a new definition of pupil location</article-title>. <source>Biomed Signal Process Control</source>. (<year>2023</year>) <volume>79</volume>:<fpage>104196</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2022.104196</pub-id></mixed-citation></ref>
<ref id="ref23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mun</surname><given-names>SB</given-names></name> <name><surname>Kim</surname><given-names>YJ</given-names></name> <name><surname>Lee</surname><given-names>JH</given-names></name> <name><surname>Han</surname><given-names>GC</given-names></name> <name><surname>Cho</surname><given-names>SH</given-names></name> <name><surname>Jin</surname><given-names>S</given-names></name> <etal/></person-group>. <article-title>Deep learning-based nystagmus detection for BPPV diagnosis</article-title>. <source>Sensors</source>. (<year>2024</year>) <volume>24</volume>:<fpage>3417</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s24113417</pub-id>, <pub-id pub-id-type="pmid">38894208</pub-id></mixed-citation></ref>
<ref id="ref24"><label>24.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Dogru</surname><given-names>HB</given-names></name> <name><surname>Kaplan</surname><given-names>A</given-names></name> <name><surname>Zengin</surname><given-names>AT</given-names></name> <name><surname>Ozkul</surname><given-names>T</given-names></name></person-group> <article-title>Image-based nystagmus analysis in BPPV disorders: polar coordinate system and template matching</article-title>. <conf-name>2024 IEEE 3rd International Conference on Computing and Machine Intelligence (ICMI)</conf-name>. <publisher-loc>Mt Pleasant, MI</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2024</year>). p. <fpage>1</fpage>&#x2013;<lpage>5</lpage>.</mixed-citation></ref>
<ref id="ref25"><label>25.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Qiu</surname><given-names>X</given-names></name> <name><surname>Shi</surname><given-names>S</given-names></name> <name><surname>Tan</surname><given-names>X</given-names></name> <name><surname>Qu</surname><given-names>C</given-names></name> <name><surname>Fang</surname><given-names>Z</given-names></name> <name><surname>Wang</surname><given-names>H</given-names></name> <etal/></person-group>. <article-title>Gram-based Attentive Neural Ordinary Differential Equations Network for Video Nystagmography Classification</article-title>. <conf-name>2023 IEEE/CVF International Conference on Computer Vision (ICCV)</conf-name>. <publisher-loc>Paris</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2023</year>). p. <fpage>21282</fpage>&#x2013;<lpage>21291</lpage>.</mixed-citation></ref>
<ref id="ref26"><label>26.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Lee</surname><given-names>Y</given-names></name> <name><surname>Seo</surname><given-names>YJ</given-names></name> <name><surname>Yang</surname><given-names>S</given-names></name></person-group> <article-title>Evaluation of nystagmus and direction in videonystagmography using wavelet transform and deep learning</article-title>. <conf-name>2024 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)</conf-name>. <publisher-loc>Lisbon</publisher-loc>: <publisher-name>IEEE</publisher-name> (<year>2024</year>). p. <fpage>7077</fpage>&#x2013;<lpage>7079</lpage>.</mixed-citation></ref>
<ref id="ref27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname><given-names>P</given-names></name> <name><surname>Liu</surname><given-names>X</given-names></name> <name><surname>Dai</surname><given-names>Q</given-names></name> <name><surname>Yu</surname><given-names>J</given-names></name> <name><surname>Zhao</surname><given-names>J</given-names></name> <name><surname>Yu</surname><given-names>F</given-names></name> <etal/></person-group>. <article-title>Diagnosing the benign paroxysmal positional vertigo via 1D and deep-learning composite model</article-title>. <source>J Neurol</source>. (<year>2023</year>) <volume>270</volume>:<fpage>3800</fpage>&#x2013;<lpage>9</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00415-023-11662-w</pub-id>, <pub-id pub-id-type="pmid">37076600</pub-id></mixed-citation></ref>
<ref id="ref28"><label>28.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Nguyen</surname><given-names>T-A-Q</given-names></name> <name><surname>Hashmi</surname><given-names>E</given-names></name> <name><surname>Yamin</surname><given-names>MM</given-names></name> <name><surname>Beghdadi</surname><given-names>A</given-names></name> <name><surname>Cheikh</surname><given-names>FA</given-names></name> <name><surname>Ullah</surname><given-names>M</given-names></name></person-group>. "<article-title>Benign paroxysmal positional Vertigo disorders classification using eye tracking data</article-title>" In: <person-group person-group-type="editor"><name><surname>Maglogiannis</surname><given-names>I</given-names></name> <name><surname>Iliadis</surname><given-names>L</given-names></name> <name><surname>Macintyre</surname><given-names>J</given-names></name> <name><surname>Avlonitis</surname><given-names>M</given-names></name> <name><surname>Papaleonidas</surname><given-names>A</given-names></name></person-group>, editors. <source>Artificial intelligence applications and innovations. IFIP advances in information and communication technology</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer Nature Switzerland</publisher-name> (<year>2024</year>). <fpage>174</fpage>&#x2013;<lpage>85</lpage>.</mixed-citation></ref>
<ref id="ref29"><label>29.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Liu</surname><given-names>Z</given-names></name> <name><surname>Wang</surname><given-names>Y</given-names></name> <name><surname>Zhu</surname><given-names>M</given-names></name> <name><surname>Zhang</surname><given-names>J</given-names></name> <name><surname>He</surname><given-names>B</given-names></name></person-group>. <article-title>Bppv nystagmus signals diagnosis framework based on deep learning</article-title>. <source>Physical and Engineering Sciences in Medicine</source> (<year>2025</year>) <volume>48</volume>:<fpage>769</fpage>&#x2013;<lpage>784</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s13246-025-01542-0</pub-id></mixed-citation></ref>
<ref id="ref30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>H</given-names></name> <name><surname>Yang</surname><given-names>Z</given-names></name></person-group>. <article-title>Vertical nystagmus recognition based on deep learning</article-title>. <source>Sensors</source>. (<year>2023</year>) <volume>23</volume>:<fpage>1592</fpage>. doi: <pub-id pub-id-type="doi">10.3390/s23031592</pub-id>, <pub-id pub-id-type="pmid">36772631</pub-id></mixed-citation></ref>
<ref id="ref31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname><given-names>H</given-names></name> <name><surname>Yang</surname><given-names>Z</given-names></name></person-group>. <article-title>Torsional nystagmus recognition based on deep learning for vertigo diagnosis</article-title>. <source>Front Neurosci</source>. (<year>2023</year>) <volume>17</volume>:<fpage>1160904</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fnins.2023.1160904</pub-id>, <pub-id pub-id-type="pmid">37360163</pub-id></mixed-citation></ref>
<ref id="ref32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lim</surname><given-names>E-C</given-names></name> <name><surname>Park</surname><given-names>JH</given-names></name> <name><surname>Jeon</surname><given-names>HJ</given-names></name> <name><surname>Kim</surname><given-names>H-J</given-names></name> <name><surname>Lee</surname><given-names>H-J</given-names></name> <name><surname>Song</surname><given-names>C-G</given-names></name> <etal/></person-group>. <article-title>Developing a diagnostic decision support system for benign paroxysmal positional vertigo using a deep-learning model</article-title>. <source>J Clin Med</source>. (<year>2019</year>) <volume>8</volume>:<fpage>633</fpage>. doi: <pub-id pub-id-type="doi">10.3390/jcm8050633</pub-id>, <pub-id pub-id-type="pmid">31072056</pub-id></mixed-citation></ref>
<ref id="ref33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alfarano</surname><given-names>A</given-names></name> <name><surname>Maiano</surname><given-names>L</given-names></name> <name><surname>Papa</surname><given-names>L</given-names></name> <name><surname>Amerini</surname><given-names>I</given-names></name></person-group>. <article-title>Estimating optical flow: a comprehensive review of the state of the art</article-title>. <source>Comput Vis Image Underst</source>. (<year>2024</year>) <volume>249</volume>:<fpage>104160</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.cviu.2024.104160</pub-id></mixed-citation></ref>
<ref id="ref34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kong</surname><given-names>S</given-names></name> <name><surname>Huang</surname><given-names>Z</given-names></name> <name><surname>Deng</surname><given-names>W</given-names></name> <name><surname>Zhan</surname><given-names>Y</given-names></name> <name><surname>Lv</surname><given-names>J</given-names></name> <name><surname>Cui</surname><given-names>Y</given-names></name></person-group>. <article-title>Nystagmus patterns classification framework based on deep learning and optical flow</article-title>. <source>Comput Biol Med</source>. (<year>2023</year>) <volume>153</volume>:<fpage>106473</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106473</pub-id>, <pub-id pub-id-type="pmid">36621190</pub-id></mixed-citation></ref>
<ref id="ref35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname><given-names>W</given-names></name> <name><surname>Wu</surname><given-names>H</given-names></name> <name><surname>Liu</surname><given-names>Y</given-names></name> <name><surname>Zheng</surname><given-names>S</given-names></name> <name><surname>Liu</surname><given-names>Z</given-names></name> <name><surname>Li</surname><given-names>Y</given-names></name> <etal/></person-group>. <article-title>Deep learning based torsional nystagmus detection for dizziness and vertigo diagnosis</article-title>. <source>Biomed Signal Process Control</source>. (<year>2021</year>) <volume>68</volume>:<fpage>102616</fpage>. doi: <pub-id pub-id-type="doi">10.1016/j.bspc.2021.102616</pub-id></mixed-citation></ref>
<ref id="ref36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lu</surname><given-names>H</given-names></name> <name><surname>Mao</surname><given-names>Y</given-names></name> <name><surname>Li</surname><given-names>J</given-names></name> <name><surname>Zhu</surname><given-names>L</given-names></name></person-group>. <article-title>Multimodal deep learning-based diagnostic model for BPPV</article-title>. <source>BMC Med Inform Decis Mak</source>. (<year>2024</year>) <volume>24</volume>:<fpage>82</fpage>. doi: <pub-id pub-id-type="doi">10.1186/s12911-024-02438-x</pub-id>, <pub-id pub-id-type="pmid">38515156</pub-id></mixed-citation></ref>
<ref id="ref37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pham</surname><given-names>TX</given-names></name> <name><surname>Choi</surname><given-names>JW</given-names></name> <name><surname>Mina</surname><given-names>RJL</given-names></name> <name><surname>Nguyen</surname><given-names>TX</given-names></name> <name><surname>Madjid</surname><given-names>SR</given-names></name> <name><surname>Yoo</surname><given-names>CD</given-names></name></person-group>. <article-title>LAD: a hybrid deep learning system for benign paroxysmal positional vertigo disorders diagnostic</article-title>. <source>IEEE Access</source>. (<year>2022</year>) <volume>10</volume>:<fpage>113995</fpage>&#x2013;<lpage>4007</lpage>. doi: <pub-id pub-id-type="doi">10.1109/ACCESS.2022.3215625</pub-id></mixed-citation></ref>
<ref id="ref38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khani</surname><given-names>M</given-names></name> <name><surname>Luo</surname><given-names>J</given-names></name> <name><surname>Assadi Shalmani</surname><given-names>M</given-names></name> <name><surname>Taleban</surname><given-names>A</given-names></name> <name><surname>Adams</surname><given-names>J</given-names></name> <name><surname>Friedland</surname><given-names>DR</given-names></name></person-group>. <article-title>Advancing personalized healthcare: leveraging explainable AI for BPPV risk assessment</article-title>. <source>Health Inf Sci Syst</source>. (<year>2024</year>) <volume>13</volume>:<fpage>1</fpage>. doi: <pub-id pub-id-type="doi">10.1007/s13755-024-00317-3</pub-id>, <pub-id pub-id-type="pmid">39606094</pub-id></mixed-citation></ref>
<ref id="ref39"><label>39.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kerber</surname><given-names>KA</given-names></name> <name><surname>Newman-Toker</surname><given-names>DE</given-names></name></person-group>. <article-title>Misdiagnosing the dizzy patient: common pitfalls in clinical practice</article-title>. <source>Neurol Clin</source>. (<year>2015</year>) <volume>33</volume>:<fpage>565</fpage>&#x2013;<lpage>viii</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.ncl.2015.04.009</pub-id>, <pub-id pub-id-type="pmid">26231272</pub-id></mixed-citation></ref>
<ref id="ref40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Han</surname><given-names>J</given-names></name> <name><surname>Wang</surname><given-names>T</given-names></name> <name><surname>Du</surname><given-names>X</given-names></name> <name><surname>Wang</surname><given-names>Y</given-names></name> <name><surname>Guo</surname><given-names>Z</given-names></name> <name><surname>Li</surname><given-names>D</given-names></name> <etal/></person-group>. <article-title>Construction and clinical validation of benign paroxysmal positional vertigo intelligent auxiliary diagnosis model based on big data analysis</article-title>. <source>Front Neurol</source>. (<year>2025</year>) <volume>16</volume>:<fpage>1636696</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fneur.2025.1636696</pub-id>, <pub-id pub-id-type="pmid">40852518</pub-id></mixed-citation></ref>
<ref id="ref41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Soylemez</surname><given-names>E</given-names></name> <name><surname>Demir</surname><given-names>S</given-names></name> <name><surname>Ozacar</surname><given-names>K</given-names></name></person-group>. <article-title>Machine learning-based mobile application for predicting posterior canal benign paroxysmal positional vertigo</article-title>. <source>Laryngoscope Investig Otolaryngol</source>. (<year>2025</year>) <volume>10</volume>:<fpage>e70177</fpage>. doi: <pub-id pub-id-type="doi">10.1002/lio2.70177</pub-id>, <pub-id pub-id-type="pmid">40521132</pub-id></mixed-citation></ref>
<ref id="ref42"><label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Swain</surname><given-names>SK</given-names></name></person-group>. <article-title>Revisiting pathophysiology of benign paroxysmal positional vertigo: a review</article-title>. <source>Int J Otorhinolaryngol Head Neck Surg</source>. (<year>2023</year>) <volume>9</volume>:<fpage>355</fpage>&#x2013;<lpage>60</lpage>. doi: <pub-id pub-id-type="doi">10.18203/issn.2454-5929.ijohns20230773</pub-id></mixed-citation></ref>
<ref id="ref43"><label>43.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Vaduva</surname><given-names>C</given-names></name> <name><surname>Est&#x00E9;ban-S&#x00E1;nchez</surname><given-names>J</given-names></name> <name><surname>Sanz-Fern&#x00E1;ndez</surname><given-names>R</given-names></name> <name><surname>Mart&#x00ED;n-Sanz</surname><given-names>E</given-names></name></person-group>. <article-title>Prevalence and management of post-BPPV residual symptoms</article-title>. <source>Eur Arch Otorrinolaringol</source>. (<year>2018</year>) <volume>275</volume>:<fpage>1429</fpage>&#x2013;<lpage>37</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s00405-018-4980-x</pub-id>, <pub-id pub-id-type="pmid">29687182</pub-id></mixed-citation></ref>
<ref id="ref44"><label>44.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baydan-Aran</surname><given-names>M</given-names></name> <name><surname>Binay-Bolat</surname><given-names>K</given-names></name> <name><surname>S&#x00F6;ylemez</surname><given-names>E</given-names></name> <name><surname>Aran</surname><given-names>OT</given-names></name></person-group>. <article-title>Predictive modeling of maneuver numbers in BPPV therapy using machine learning</article-title>. <source>J Vestib Res</source>. (<year>2025</year>):<fpage>09574271251351905</fpage>. doi: <pub-id pub-id-type="doi">10.1177/09574271251351905</pub-id>, <pub-id pub-id-type="pmid">40512135</pub-id></mixed-citation></ref>
<ref id="ref45"><label>45.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Oth&#x00E9;guy</surname><given-names>M</given-names></name> <name><surname>Nourrit</surname><given-names>V</given-names></name> <name><surname>Bougrenet De La Tocnaye</surname><given-names>J-L</given-names></name></person-group> <article-title>Instrumented contact lens to detect gaze movements independently of eye blinks</article-title> <source>Transl Vis Sci Technol</source> <year>2024</year> <volume>13</volume>:<fpage>12</fpage> doi: <pub-id pub-id-type="doi">10.1167/tvst.13.11.12</pub-id> <pub-id pub-id-type="pmid">39535746</pub-id></mixed-citation></ref>
<ref id="ref46"><label>46.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hassija</surname><given-names>V</given-names></name> <name><surname>Chamola</surname><given-names>V</given-names></name> <name><surname>Mahapatra</surname><given-names>A</given-names></name> <name><surname>Singal</surname><given-names>A</given-names></name> <name><surname>Goel</surname><given-names>D</given-names></name> <name><surname>Huang</surname><given-names>K</given-names></name> <etal/></person-group>. <article-title>Interpreting black-box models: a review on explainable artificial intelligence</article-title>. <source>Cogn Comput</source>. (<year>2024</year>) <volume>16</volume>:<fpage>45</fpage>&#x2013;<lpage>74</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s12559-023-10179-8</pub-id></mixed-citation></ref>
<ref id="ref47"><label>47.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Birch</surname><given-names>J</given-names></name> <name><surname>Creel</surname><given-names>KA</given-names></name> <name><surname>Jha</surname><given-names>AK</given-names></name> <name><surname>Plutynski</surname><given-names>A</given-names></name></person-group>. <article-title>Clinical decisions using AI must consider patient values</article-title>. <source>Nat Med</source>. (<year>2022</year>) <volume>28</volume>:<fpage>229</fpage>&#x2013;<lpage>32</lpage>. doi: <pub-id pub-id-type="doi">10.1038/s41591-021-01624-y</pub-id>, <pub-id pub-id-type="pmid">35102337</pub-id></mixed-citation></ref>
</ref-list>
<fn-group>
<fn fn-type="custom" custom-type="edited-by" id="fn0001">
<p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1740087/overview">Piergiorgio Lochner</ext-link>, Saarland University Hospital, Germany</p>
</fn>
<fn fn-type="custom" custom-type="reviewed-by" id="fn0002">
<p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3185036/overview">Juanli Xing</ext-link>, Xi'an Jiaotong University, China</p>
</fn>
</fn-group>
</back>
</article>