<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="review-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Oral Health</journal-id><journal-title-group>
<journal-title>Frontiers in Oral Health</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Oral Health</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-4842</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/froh.2026.1760177</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Mini Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Prospective applications of artificial intelligence for the diagnosis of oral leukoplakia: a scoping review</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Jim&#x00E9;nez</surname><given-names>Constanza</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2811754/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role></contrib>
<contrib contrib-type="author"><name><surname>Ledesma</surname><given-names>Carolina</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role></contrib>
<contrib contrib-type="author"><name><surname>Naranjo</surname><given-names>Tamara</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role></contrib>
<contrib contrib-type="author"><name><surname>Fern&#x00E1;ndez</surname><given-names>Alejandra</given-names></name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/1194209/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role></contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Mart&#x00ED;nez-Flores</surname><given-names>Ren&#x00E9;</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/1026586/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role></contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>Niklander</surname><given-names>Sven Eric</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/882735/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role></contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Unit of Oral Pathology and Medicine, Faculty of Dentistry, Universidad Andres Bello</institution>, <city>Vi&#x00F1;a del Mar</city>, <country country="cl">Chile</country></aff>
<aff id="aff2"><label>2</label><institution>Dermoral Research Group, Laboratory of Translational Dentistry, Faculty of Dentistry, Universidad Andres Bello</institution>, <city>Santiago</city>, <country country="cl">Chile</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Sven Eric Niklander <email xlink:href="mailto:sven.niklander@unab.cl">sven.niklander@unab.cl</email> Ren&#x00E9; Mart&#x00ED;nez-Flores <email xlink:href="mailto:rene.martinez@unab.cl">rene.martinez@unab.cl</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-16"><day>16</day><month>02</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2026</year></pub-date>
<volume>7</volume><elocation-id>1760177</elocation-id>
<history>
<date date-type="received"><day>03</day><month>12</month><year>2025</year></date>
<date date-type="rev-recd"><day>16</day><month>01</month><year>2026</year></date>
<date date-type="accepted"><day>19</day><month>01</month><year>2026</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 Jim&#x00E9;nez, Ledesma, Naranjo, Fern&#x00E1;ndez, Mart&#x00ED;nez-Flores and Niklander.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>Jim&#x00E9;nez, Ledesma, Naranjo, Fern&#x00E1;ndez, Mart&#x00ED;nez-Flores and Niklander</copyright-holder><license><ali:license_ref start_date="2026-02-16">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<abstract><sec><title>Introduction</title>
<p>Oral leukoplakia (OL) is the most prevalent oral potentially malignant disorder worldwide. Its diagnosis is clinical and based on excluding all other white patches of the oral cavity, which can be challenging and time-consuming. In recent years, artificial intelligence (AI) has emerged as a promising tool to overcome these limitations, yet a comprehensive overview of the existing evidence is still lacking.</p>
</sec><sec><title>Objective</title>
<p>This scoping review surveys the current landscape of artificial intelligence applications for diagnosing oral leukoplakia, both clinically and histopathologically.</p>
</sec><sec><title>Materials and methods</title>
<p>A comprehensive search was conducted in PubMed, Scopus, Web of Science, and OVID for studies on the use of artificial intelligence for the diagnosis of oral leukoplakia. No date/language restrictions were applied. Two reviewers screened articles and extracted data into predefined tables.</p>
</sec><sec><title>Results</title>
<p>Ten studies were included. Early research used spectroscopy-based models, while recent work employed deep learning for clinical and histopathological image analysis. Most models achieved moderate-to-high diagnostic performance, with sensitivity, specificity and accuracy values above 80&#x0025;. Overall, models allowed differentiating oral leukoplakia from normal oral mucosa, oral squamous cell carcinoma, and proliferative verrucous leukoplakia, with stronger performance in advanced lesions. Furthermore, artificial intelligence showed promise in grading oral epithelial dysplasia severity in histological samples, occasionally outperforming oral pathologists.</p>
</sec><sec><title>Conclusions</title>
<p>While current evidence remains preliminary, artificial intelligence shows promise as an adjunct tool for oral leukoplakia diagnosis. However, standardized reporting, inclusion of lesions within datasets, and multicenter validation in large and diverse cohorts are still needed to ensure generalizability and further clinical validation.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>diagnosis</kwd>
<kwd>deep learning</kwd>
<kwd>leukoplakia</kwd>
<kwd>oral medicine</kwd>
<kwd>machine learning</kwd>
<kwd>mouth neoplasms</kwd>
</kwd-group><funding-group><funding-statement>The author(s) declared that financial support was received for this work and/or its publication. This work was supported by the Agencia Nacional de Investigaci&#x00F3;n y Desarrollo (ANID) through the FONDECYT Iniciaci&#x00F3;n grant No. 11250099, and by Fondo N&#x00FA;cleo No. DI-03-25/NUC, Universidad Andr&#x00E9;s Bello.</funding-statement></funding-group><counts>
<fig-count count="1"/>
<table-count count="1"/><equation-count count="0"/><ref-count count="38"/><page-count count="10"/><word-count count="0"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Oral Cancers</meta-value></custom-meta></custom-meta-group>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><title>Introduction</title>
<p>Oral leukoplakia (OL) is the most common oral potentially malignant disorder of the oral cavity, demanding reliable diagnosis and risk stratification (<xref ref-type="bibr" rid="B1">1</xref>). It usually presents as a white plaque that cannot be scraped off and cannot be attributed to any other definable disease (<xref ref-type="bibr" rid="B1">1</xref>). OL affects approximately 2.6&#x0025; of the population (<xref ref-type="bibr" rid="B2">2</xref>) predominantly men over 50 years&#x2014; with a malignant transformation rate estimated in 9.5&#x0025; (<xref ref-type="bibr" rid="B3">3</xref>).</p>
<p>Clinically, OL is classified as homogeneous or non-homogeneous. Homogeneous lesions appear as uniformly white plaques, with smooth, regular surfaces and sharp margins. In contrast, non-homogeneous OLs display mixed white/red patterns, and are further divided into speckled (alternating white and red areas), nodular (with small, polypoid exophytic nodules), or verrucous (characterized by a wrinkled or corrugated surface) (<xref ref-type="bibr" rid="B4">4</xref>). Regardless of the subtype, lesion size may range from well-defined millimeter-sized plaques to extensive patches covering broad areas of the oral cavity (<xref ref-type="bibr" rid="B4">4</xref>). Non-homogenous lesions are associated with higher degrees of dysplasia and with a higher risk of transforming into an oral squamous cell carcinoma (OSCC) (<xref ref-type="bibr" rid="B5">5</xref>, <xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>The diagnosis of OL is clinical, often mimicking other white or mixed white and red lesions (such as candidiasis, frictional keratosis, or lichen planus). Therefore its clinical diagnosis can be challenging, especially for general dentists or non-oral medicine practitioners (<xref ref-type="bibr" rid="B7">7</xref>). A comprehensive evaluation must start by excluding any traumatic or reactive causes, followed by an attempt to gently scrape the lesion to ensure it cannot be removed. If the lesion persists, the adjacent mucosa should be stretched to rule out leukoedema. Once these and other differential diagnoses are excluded, a biopsy must be performed, as histopathological examination is mandatory to provide definitive diagnosis and assess the presence and severity of epithelial dysplasia (mild, moderate or severe) (<xref ref-type="bibr" rid="B4">4</xref>).</p>
<p>Even with this systematic diagnostic workflow, the histopathological evaluation of OL has limitations. For example, incisional biopsies of large or multifocal lesions can miss critical dysplastic areas. Histologic similarities with other entities, such as lichen planus, can generate diagnostic doubts (<xref ref-type="bibr" rid="B8">8</xref>). Dysplasia grading also remains highly subjective, showing significant inter- and intra-observer variability. With no definitive molecular or immunohistochemical markers to confirm the diagnosis of OL, there is a pressing demand for novel, precise and objective diagnostic approaches (<xref ref-type="bibr" rid="B8">8</xref>). In this context, Artificial Intelligence (AI)-driven analysis and decision-support tools hold promise for improving consistency, accuracy, and lesion detection (<xref ref-type="bibr" rid="B9">9</xref>).</p>
<p>AI encompasses the theory and creation of computer systems capable of performing tasks with human-like intelligence, including visual perception, speech recognition, decision-making, and language translation (<xref ref-type="bibr" rid="B10">10</xref>). Machine Learning (ML) is a subfield of AI that focuses on algorithms that improve their performance through exposure to data. In supervised learning, for example, ML models detect patterns in labeled datasets and apply them to make predictions on new, unseen inputs without explicit programming (<xref ref-type="bibr" rid="B11">11</xref>). Deep Learning (DL), on the other hand, extends ML by using computer neural networks (CNN) inspired by the human brain (<xref ref-type="bibr" rid="B12">12</xref>). These systems of interconnected nodes or neurons are organized into an input layer, one or more hidden layers, and a final output later. During forward propagation, data passes through these layers to produce an output or prediction, whereas during backpropagation, the neural network is able to adjust its internal weights based on the error between its prediction and the true outcome. This iterative process of propagation and weight adjustment allows the network to self-correct and continually enhance its performance over time (<xref ref-type="bibr" rid="B13">13</xref>).</p>
<p>In recent years, numerous studies have investigated the application of ML and DL technologies to support clinicians in diagnosing OL (<xref ref-type="bibr" rid="B14">14</xref>); however, a comprehensive synthesis of this evidence is still needed. Therefore, this scoping review aims to systematically synthesize current evidence on the use of artificial intelligence technologies, particularly classification algorithms, for the diagnosis of OL and for the assessment of epithelial dysplasia severity in clinically diagnosed OL lesions.</p>
</sec>
<sec id="s2" sec-type="methods"><title>Materials and methods</title>
<sec id="s2a"><title>Protocol, registration and reporting</title>
<p>This scoping review was approved by Ethics and Scientific Committee at the Faculty of Dentistry at Universidad Andres Bello, Vi&#x00F1;a del Mar, Chile (Acta de Aprobaci&#x00F3;n 08-2024 &#x0023;PROPRGFO_2024_66). The methodology followed the &#x201C;Scoping Reviews&#x201D; chapter of the JBI Manual for Evidence Synthesis (<xref ref-type="bibr" rid="B15">15</xref>). The protocol is publicly available on the Open Science Framework (<ext-link ext-link-type="uri" xlink:href="https://osf.io/qgt86/overview">https://osf.io/qgt86/overview</ext-link>), and the final manuscript was prepared in accordance with the PRISMA-ScR checklist (<ext-link ext-link-type="uri" xlink:href="https://www.prisma-statement.org/scoping">https://www.prisma-statement.org/scoping</ext-link>).</p>
</sec>
<sec id="s2b"><title>Research question</title>
<p>This review addressed the question: &#x201C;How effective are AI-based methods in classifying OL for both clinical and histopathological diagnosis, as well as in assessing and grading the severity of epithelial dysplasia in adult patients across diverse clinical settings?&#x201D;</p>
</sec>
<sec id="s2c"><title>Eligibility criteria</title>
<p>Eligibility was defined using the Population/Concept/Context (PCC) framework: (i) Population: studies involving adult patients diagnosed with OL; (ii) Concept: studies evaluating the diagnostic performance of preliminary ML and/or DL models for OL diagnosis; and (iii) Context: studies conducted in any clinical setting.</p>
<p>Only studies published in the last 25 years in English or Spanish were included. Exclusion criteria were non-primary research (e.g., editorials, letters to the editor, reviews, etc.), studies not addressing the use of ML and/or DL for OL diagnosis, articles lacking clearly defined methods, and unavailability of full-text access.</p>
</sec>
<sec id="s2d"><title>Information sources and electronic literature search</title>
<p>Systematic literature searches were conducted in PubMed, SCOPUS, Web of Science, and OVID between March-May 2024, and updated in July 2025. Medical Subject Headings (MeSH) terms were selected based on a pilot search using relevant keywords. The final search queries were:
<list list-type="simple">
<list-item>
<p>SCOPUS: &#x201C; (&#x201C;Leukoplakia, oral&#x201D; OR leukoplakia) AND &#x201C;artificial Intelligence&#x201D; AND PUBYEAR &#x003E; 2,000 AND PUBYEAR &#x003C; 2025 AND [LIMIT-TO (DOCTYPE, &#x201C;ar&#x201D;)] AND [LIMIT-TO (LANGUAGE, &#x201C;English&#x201D;) OR LIMIT-TO (LANGUAGE, &#x201C;Spanish&#x201D;)] AND [LIMIT-TO (SRCTYPE, &#x201C;j&#x201D;)]&#x201D;.</p></list-item>
<list-item>
<p>PubMED: (Leukoplakia, oral OR leukoplakia) AND artificial Intelligence.</p></list-item>
<list-item>
<p>Web of Science: ([ALL&#x003D;(Leukoplakia, oral)] OR ALL&#x003D;(leukoplakia)) AND ALL&#x003D;(artificial Intelligence)</p></list-item>
<list-item>
<p>OVID: [(Leukoplakia, oral or leukoplakia) and artificial Intelligence].af.</p></list-item>
</list></p>
</sec>
<sec id="s2e"><title>Study selection</title>
<p>All records were imported into Rayyan (<ext-link ext-link-type="uri" xlink:href="https://www.rayyan.ai/">https://www.rayyan.ai/</ext-link>) for reference management. Two reviewers (C.L and T.N) independently screened titles, abstracts, and full texts. Discrepancies were resolved through consultation with C.J and S.N.</p>
</sec>
<sec id="s2f"><title>Data extraction and synthesis</title>
<p>Data were extracted using pre-defined Excel templates by reviewers C.L and T.N. Results were synthesized narratively and presented using tables and figures. Critical appraisal of included studies was not performed, as it is not mandatory for scoping reviews (<xref ref-type="bibr" rid="B15">15</xref>).</p>
</sec>
</sec>
<sec id="s3" sec-type="results"><title>Results</title>
<p>The electronic search yielded 233 records. After deduplication removal (<italic>n</italic>&#x2009;&#x003D;&#x2009;67), 166 studies were screened based on the predefined eligibility criteria. Of these, 142 were excluded, leaving 24 articles for retrieval and full-text review. Eighteen articles were excluded at this stage for the following reasons: not meeting the &#x201C;concept&#x201D; criterion (<italic>n</italic>&#x2009;&#x003D;&#x2009;16), being a narrative review (<italic>n</italic>&#x2009;&#x003D;&#x2009;1), or having been retracted (<italic>n</italic>&#x2009;&#x003D;&#x2009;1). Initially, 6 primary publications were included in this review. However, 4 additional articles were identified during the search update in July 2025, bringing the final total to ten publications. The study selection process is illustrated in the PRISMA-ScR flow diagram (<xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref>).</p>
<fig id="F1" position="float"><label>Figure&#x00A0;1</label>
<caption><p>Prisma flow diagram.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="froh-07-1760177-g001.tif"><alt-text content-type="machine-generated">Flowchart detailing the identification and screening process of studies. Updated studies: Records from PubMed, SCOPUS, and registers total six included studies. Identification of studies via databases: Records from PubMed, SCOPUS, Web of Science, OVID, and registers total 269, with 67 removed as duplicates. Screening: 166 records screened, 142 excluded. Eligibility: 24 reports assessed, 18 excluded for various reasons. Review: Six studies included, categorized by spectroscopy-based (four), clinical image-based (four), and histopathological image-based AI models (two), totaling ten studies.</alt-text>
</graphic>
</fig>
<sec id="s3a"><title>Overview of included studies</title>
<p>The current body of literature on AI-based approaches to diagnose OL reflects a dynamic and heterogenous methodological landscape (<xref ref-type="table" rid="T1">Table&#x00A0;1</xref>). The earliest studies were published in 2000 (<xref ref-type="bibr" rid="B16">16</xref>) and 2015 (<xref ref-type="bibr" rid="B17">17</xref>), with all subsequent publications published more recently, between 2020 and 2025 (<xref ref-type="bibr" rid="B16">16</xref>&#x2013;<xref ref-type="bibr" rid="B25">25</xref>). Main objectives in the field were: (a) OL classification and differentiation from normal oral mucosa (NOM) (<xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B25">25</xref>), (b) OL classification and differentiation from OSCC (<xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B23">23</xref>), (c) OL classification and differentiation from other white lesions of the oral cavity (<xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B24">24</xref>), and (d) to assess the severity of OL, particularly through grading of epithelial dysplasia in histological samples (<xref ref-type="bibr" rid="B20">20</xref>).</p>
<table-wrap id="T1" position="float"><label>Table&#x00A0;1</label>
<caption><p>AI-based diagnostic models for oral leukoplakia.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Ref.</th>
<th valign="top" align="center">Author (year)</th>
<th valign="top" align="center">Study design</th>
<th valign="top" align="center">Objective</th>
<th valign="top" align="center">Dataset</th>
<th valign="top" align="center">AI type/model</th>
<th valign="top" align="center">Task</th>
<th valign="top" align="center">Main comparison</th>
<th valign="top" align="center">Key findings</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B16">16</xref>)</td>
<td valign="top" align="left">Van Staveren et al. (2000)</td>
<td valign="top" align="left">Diagnostic model development study (experimental)</td>
<td valign="top" align="left">To evaluate the performance of an artificial neural network as an alternative classification technique of autofluorescence spectra of OL, which may reflect the grade of tissue dysplasia.</td>
<td valign="top" align="left">Autofluorescence spectroscopy measurements</td>
<td valign="top" align="left">ML/ANN</td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">OL vs. NOM</td>
<td valign="top" align="left">The model distinguished abnormal from NOM with 86&#x0025; sensitivity and 100&#x0025; specificity and reliably separated homogeneous from non-homogeneous OL.<break/>Spectral features showed little to no association with lesion morphology (verrucous vs. erosive) or histologic grade (dysplasia, hyperplasia, hyperkeratosis).</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B17">17</xref>)</td>
<td valign="top" align="left">Banerjee et al. (2015)</td>
<td valign="top" align="left">Diagnostic model development study (experimental)</td>
<td valign="top" align="left">Identification of specific label-free biomarkers for differentiation of OL and OSCC.</td>
<td valign="top" align="left">FTIR spectroscopy measurements</td>
<td valign="top" align="left">ML/SVM</td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">OL vs. OSCC</td>
<td valign="top" align="left">Six spectral features successfully classified OL and OSCC with high sensitivity and specificity.<break/>Altered glycogen and keratin content in histological samples could be used to discriminate OL and OSCC.</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B25">25</xref>)</td>
<td valign="top" align="left">Jurczyszyn et al. (2020)</td>
<td valign="top" align="left">Diagnostic model development study (retrospective)</td>
<td valign="top" align="left">To propose an effective texture analysis algorithm for OL diagnosis.</td>
<td valign="top" align="left">Clinical images</td>
<td valign="top" align="left">ML/PNN</td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">OL vs. NOM</td>
<td valign="top" align="left">Differentiation of OL from NOM was highly successful (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.05) with the model showing full OL recognition (sensitivity 100&#x0025;) and specificity 97&#x0025;.</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B18">18</xref>)</td>
<td valign="top" align="left">Ghosh et al. (2022)</td>
<td valign="top" align="left">Diagnostic model development study (experimental)</td>
<td valign="top" align="left">To develop and evaluate a deep reinforced neural network model to classify the epigenetic changes identified from the Raman and FTIR spectra.</td>
<td valign="top" align="left">FTIR and Raman spectroscopy measurements</td>
<td valign="top" align="left">DL/DRNN</td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">OL vs. OSCC</td>
<td valign="top" align="left">The model achieved an overall accuracy of 83.33&#x0025; and an ROC of 0.88.<break/>Class-specific accuracies for NOM, OL and OSCC were: 83.3&#x0025;, 87&#x0025; and 95.24&#x0025;, respectively.</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B20">20</xref>)</td>
<td valign="top" align="left">Peng et al. (2024)</td>
<td valign="top" align="left">Diagnostic and grading model development (retrospective)</td>
<td valign="top" align="left">To establish an objective, accurate and useful detection and grading system for oral epithelial dysplasia in the whole slides of OL</td>
<td valign="top" align="left">Histopathological images and microarray data</td>
<td valign="top" align="left">DL/CNN (E-MOD and E-MOD-plus)</td>
<td valign="top" align="left">Detection&#x2009;&#x002B;<sans-serif>&#x2009;Classification</sans-serif></td>
<td valign="top" align="left">None<break/>(focused on OL dysplasia grading)</td>
<td valign="top" align="left">E-MOD-plus demonstrated strong internal performance for detecting and grading oral epithelial dysplasia in OL, achieving 81.3&#x0025; accuracy (95&#x0025; CI: 71.4&#x2013;90.5&#x0025;) with an AUC of 0.793 (95&#x0025; CI: 0.650 to 0.925).<break/>When validated externally on microarray images, accuracy rose to 86.5&#x0025; (95&#x0025; CI: 82.4&#x2013;90.0&#x0025;) while AUC dipped to 0.669 (95&#x0025; CI: 0.496 to 0.843).<break/>The model outperformed 3 experienced oral pathologists.</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B21">21</xref>)</td>
<td valign="top" align="left">Ramesh et al. (2025)</td>
<td valign="top" align="left">Diagnostic model development study (retrospective)</td>
<td valign="top" align="left">To employ and compare the CNNs Xception and MobileNet-v2 for the diagnosis of OL and to differentiate its clinical types from other white lesions of the oral cavity.</td>
<td valign="top" align="left">Clinical images</td>
<td valign="top" align="left">DL/CNN (MobileNetV2 and Xception)</td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">OL vs. other white lesions</td>
<td valign="top" align="left">Both models were able to diagnose OL and other white lesions using photographs.<break/>In terms of F1-score and overall accuracy, the MobilenetV2 model performed noticeably better than Xception (accuracies: 92&#x0025; and 89&#x0025;, respectively).</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B22">22</xref>)</td>
<td valign="top" align="left">Schmidl et al. (2025)</td>
<td valign="top" align="left">Diagnostic model testing (cross-sectional)</td>
<td valign="top" align="left">To evaluate the application of image recognition by ChatGPT to diagnose OSCC and OL based on clinical images, with images without any lesion as a control group.</td>
<td valign="top" align="left">Clinical images with or without clinical history.</td>
<td valign="top" align="left">DL/LLM with vision (ChatGPT 4.0)</td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">OL vs. OSCC</td>
<td valign="top" align="left">ChatGPT 4.0 demonstrated the ability to correctly identify OL cases using image recognition alone (sensitivity of 72.2&#x0025;, specificity of 92.6&#x0025;, and accuracy of 84.4&#x0025;), while the ability to diagnose OSCC was insufficient (sensitivity of 18.2&#x0025;, specificity of 52.2&#x0025;, and accuracy of 35.6&#x0025;).<break/>However, the diagnostic performance improved by including the clinical history in the prompt (OL performance: sensitivity of 93.3&#x0025;, specificity of 96,7&#x0025;, and accuracy of 95,6&#x0025;; OSCC performance: sensitivity of 100&#x0025;, specificity of 88.2&#x0025;, and accuracy of 91.1&#x0025;).<break/>Finally, relying only in clinical history resulted in a misclassification of most OL and some OSCC cases.</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B19">19</xref>)</td>
<td valign="top" align="left">Muniz de Lima et al. (2023)</td>
<td valign="top" align="left">Diagnostic model development study (retrospective)</td>
<td valign="top" align="left">To evaluate the importance of complementary data to histopathological image analysis of OL and OSCC for computer-aided diagnosis.</td>
<td valign="top" align="left">Histopathological images and data</td>
<td valign="top" align="left">DL/ResNetV2&#x2009;&#x002B;&#x2009;Metablock</td>
<td valign="top" align="left">Multiclass Classification</td>
<td valign="top" align="left">OL without dysplasia vs. OL with dysplasia vs. OSCC</td>
<td valign="top" align="left">The highest balanced accuracy for binary classification (OL vs. OSCC) was 95.32&#x0025;.<break/>For multiclass classification (OL without dysplasia vs. OL with dysplasia vs. OSCC), the highest balanced accuracy was 83.24&#x0025;.<break/>The combined use of complementary data and histopathological images achieved a 30.68&#x0025; gain in performance compared to image-only approaches in multiclass classification tasks.</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B23">23</xref>)</td>
<td valign="top" align="left">Zhang et al. (2025)</td>
<td valign="top" align="left">Diagnostic model development study (cross-sectional)</td>
<td valign="top" align="left">To evaluate whether a machine learning model can accurately identify oral mucosal diseases&#x2014;including OL&#x2014;based on sub-diffuse reflectance spectroscopy measurements.</td>
<td valign="top" align="left">Sub-diffuse reflectance spectroscopy measurements.</td>
<td valign="top" align="left">ML&#x2009;&#x002B;&#x2009;DL/SVM&#x2009;&#x002B;<sans-serif>&#x2009;PNN</sans-serif></td>
<td valign="top" align="left">Classification</td>
<td valign="top" align="left">OL vs. NOM vs. OSCC vs. OLP</td>
<td valign="top" align="left">Both SVM and PNN yielded comparable accuracy in distinguishing healthy from diseased spectra.<break/>Even using only optical parameters features, the models differentiated among OLP, OL, OSCC, and normal mucosa, with high classification metrics, achieving at least 0.8289 accuracy, 0.8495 sensitivity, 0.9311 specificity, and a Matthews correlation coefficient: 0.8085.</td>
</tr>
<tr>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B24">24</xref>)</td>
<td valign="top" align="left">Schwarzler et al. (2025)</td>
<td valign="top" align="left">Diagnostic model development and validation study (restrospective)</td>
<td valign="top" align="left">To evaluate whether a deep learning model trained to discriminate 11 classes of oral mucosal lesions could exceed the performance of general dentists.</td>
<td valign="top" align="left">Clinical images</td>
<td valign="top" align="left">DL/YOLOv8</td>
<td valign="top" align="left">Detection&#x2009;&#x002B;<sans-serif>&#x2009;Classification</sans-serif></td>
<td valign="top" align="left">OL vs. PVL, OLP, Keratosis</td>
<td valign="top" align="left">For OL, the model achieved a sensitivity of 0.59 and a specificity of 0.94. The F1-score was 0.54 with a precision of 0.5, and an AUCROC 0.86.<break/>For PVL, the model achieved a sensitivity of 0.89 and a specificity of 0.97. The F1-score was 0.64 with a precision of 0.5, and an AUCROC 0.91.<break/>Keratosis was most likely confused with OL or &#x201C;white&#x201D; OLP.In terms of object detection, some of the best results were observed for PVL whereas the lowest values were noted for OL.<break/>The overall performance of the final model was comparable to that of oral surgeons (<italic>p</italic>&#x2009;&#x003D;&#x2009;0.93), however the model outperformed general dentists (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.01),</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn id="TF1"><p>OL, oral leukoplakia; NOM, normal oral mucosa; OSCC, oral squamous cell carcinoma; PVL, proliferative verrucous leukoplakia; OLP, oral lichen planus; OLP, oral lichen planus; AI, artificial intelligence; FTIR, fourier-transform infrared; AUC, area under the curve; ROC, receiver operating characteristic; DL, deep learning; ML, machine learning; ANN, artificial neural network; SVM, support vector machine; PNN, probabilistic neural network; DRNN, deep reinforced neural network; CNN, convolutional neural network.</p></fn>
</table-wrap-foot>
</table-wrap>
<p>A variety of dataset types were utilized, including spectroscopy-based profiles (<xref ref-type="bibr" rid="B16">16</xref>&#x2013;<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B23">23</xref>), clinical and histopathological images (<xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>), as well as multimodal approaches that integrated clinical or histopathological images with other data, such as electronic health records and/or genomic information (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B22">22</xref>). Research was conducted across multiple countries: India led with 3 publications (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B21">21</xref>), followed by China with two articles (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B23">23</xref>) and Germany (<xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>). Single studies originated from Brazil (<xref ref-type="bibr" rid="B19">19</xref>), Poland (<xref ref-type="bibr" rid="B25">25</xref>), and the Netherlands (<xref ref-type="bibr" rid="B16">16</xref>). All articles were published in English, across a diverse array of journals, reflecting the interdisciplinary interest in applying AI models to the diagnosis of OL (<xref ref-type="table" rid="T1">Table&#x00A0;1</xref>).</p>
</sec>
<sec id="s3b"><title>Spectroscopy-based AI models</title>
<p>Early research into AI-based tools for non-invasive diagnosis of OL began in 2000 with the work of Van Staveren et al. (<xref ref-type="bibr" rid="B16">16</xref>). The research team explored the use of artificial neural networks to differentiate OL from NOM using tissue autofluorescence measures obtained through spectroscopy. Their model demonstrated promising diagnostic performance, achieving 86&#x0025; sensitivity and 100&#x0025; specificity for the task (<xref ref-type="bibr" rid="B16">16</xref>). However, it failed to identify specific spectral patterns that consistently reflected on OL morphology or the histopathological grade of dysplasia within lesions (<xref ref-type="bibr" rid="B16">16</xref>).</p>
<p>More than a decade later, Banerjee et al. (<xref ref-type="bibr" rid="B17">17</xref>) and Ghosh et al. (<xref ref-type="bibr" rid="B18">18</xref>) investigated the use of Fourier-transform infrared (FTIR) spectroscopy and combined FTIR/Raman spectroscopy, respectively, to distinguish OL lesions from NOM and OSCC using AI-based algorithms. In Banerjee&#x0027;s study, the best-performing support vector machine (SVM) model, differentiated OL from NOM with 82.1&#x0025; accuracy, 75&#x0025; sensitivity, and 91.3&#x0025; specificity. For distinguishing OL from OSCC, the top-performing model reached 82.1&#x0025; accuracy, 68.8&#x0025; sensitivity, and 91.3&#x0025; specificity. Finally, classification of NOM vs. OSCC achieved 89.7&#x0025; accuracy, 81.3&#x0025; sensitivity, and 95.7&#x0025; specificity (<xref ref-type="bibr" rid="B17">17</xref>). In parallel, Gosh&#x0027;s deep reinforced neural network achieved an overall accuracy of 83.33&#x0025; and an area under the receiver operating characteristic curve (ROC) of 0.88 (<xref ref-type="bibr" rid="B18">18</xref>). Notably, class-specific accuracies for identifying NOM, OL, and OSCC were 83.3&#x0025;, 87&#x0025;, and 95.24&#x0025;, respectively (<xref ref-type="bibr" rid="B18">18</xref>)&#x2014; suggesting that model performance improved with increasing lesion severity, a trend consistent with the findings reported by Banerjee et al.</p>
<p>More recently, in 2025, Zhang et al. (<xref ref-type="bibr" rid="B23">23</xref>) developed AI models for the detection of OL, OSCC, and oral lichen planus (OLP) through the analysis of sub-diffuse reflectance spectroscopy measurements. Their study demonstrated that both SVM and probabilistic neural network (PNN) models yield comparable performance in distinguishing NOM from abnormal oral mucosa. Furthermore, the models were able to accurately classify OL, OSCC, OLP with high overall accuracy (0.83), sensitivity (0.85) and specificity (0.93).</p>
</sec>
<sec id="s3c"><title>Clinical image-based AI models</title>
<p>In addition to spectroscopy-derived features, AI has been increasingly applied to the analysis of clinical images of OL lesions. Jurczyszyn et al. developed a highly effective texture analysis algorithm capable of distinguishing clinical images of OL from NOM with 100&#x0025; sensitivity and 97&#x0025; specificity (<xref ref-type="bibr" rid="B25">25</xref>).</p>
<p>Another study by Ramesh et al. evaluated two convolutional neural network (CNN) models &#x2014;MobileNetV2 and Xception&#x2014;for image-based detection of OL and other common white lesions (<xref ref-type="bibr" rid="B21">21</xref>). Although both models achieved strong overall accuracies (92&#x0025; for MobileNetV2&#x0025; and 89&#x0025;for Xception), MobileNetV2 demonstrated consistently higher sensitivity, both for non-homogenous OL (92&#x0025; vs. 85&#x0025;) and for other white lesions (94&#x0025; vs. 91&#x0025;, respectively). These findings suggest that MobileNetV2 is better suited for identifying more challenging OL lesions (<xref ref-type="bibr" rid="B21">21</xref>).</p>
<p>Considering that OL often coexists with or mimics other mucosal conditions, Schw&#x00E4;rzler et al. marked a significant milestone in this field by evaluating whether a DL model could discriminate among 11 classes of oral lesions, including OL and proliferative verrucous leukoplakia (PVL) (<xref ref-type="bibr" rid="B24">24</xref>). The study also compared the model&#x0027;s diagnostic performance against that of general dentists and oral surgery specialists. For OL, the model achieved moderate performance, with a sensitivity of 0.59, specificity of 0.94, F1-score of 0.54, precision of 0.5, and an AUC-ROC 0.86 (<xref ref-type="bibr" rid="B24">24</xref>). In contrast, performance was notably better for PVL, with a sensitivity of 0.89, specificity of 0.97, F1-score of 0.64, precision of 0.5, and AUC-ROC 0.91. Frictional keratosis was frequently misclassified as OL or &#x201C;white&#x201D; OLP (<xref ref-type="bibr" rid="B24">24</xref>), underscoring the clinical difficulty of visually distinguishing between these lesions. In terms of lesion detection, the model performed better with PVL compared to OL (<xref ref-type="bibr" rid="B24">24</xref>). Finally, the model&#x0027;s overall diagnostic performance was comparable to oral surgeon specialists (<italic>p</italic>&#x2009;&#x003D;&#x2009;0.93), but significantly outperformed general dentists (<italic>p</italic>&#x2009;&#x003D;&#x2009;0.01) (<xref ref-type="bibr" rid="B24">24</xref>).</p>
<p>Schmidl et al. introduced a novel diagnostic framework using ChatGPT v4.0 to identify clinical images of OL and OSCC, both with and without accompanying clinical records (<xref ref-type="bibr" rid="B22">22</xref>). When relying solely on images, the model demonstrated a reliable performance for OL classification, achieving a sensitivity of 72,2&#x0025;, specificity of 92,6&#x0025;, and accuracy of 84,4&#x0025;. In contrast, its ability to classify OSCC under the same conditions was substantially lower, with a sensitivity of 18,2&#x0025;, specificity of 52,2&#x0025;, and accuracy of 35,6&#x0025;. Interestingly, providing the patients&#x2019; clinical records into the prompt greatly improved the model&#x0027;s diagnostic performance across all categories, highlighting the value of integrating visual and contextual data in AI-assisted diagnosis (<xref ref-type="bibr" rid="B22">22</xref>). For OL, sensitivity increased to 93.3&#x0025;, specificity to 96.7&#x0025;, and accuracy to 95.6&#x0025;; whereas for OSCC, sensitivity rose to 100&#x0025;, with specificity of 88.2&#x0025; and accuracy to 91.1&#x0025;.</p>
</sec>
<sec id="s3d"><title>Histopathological image- and data-based AI models</title>
<p>Histopathological analysis offers a deeper layer of diagnostic information, where AI has also shown considerable promise. In this context, Muniz de Lima et al. developed and evaluated deep CNN algorithms for distinguishing OL from OSCC, using histopathological images supplemented by demographic and clinical data (<xref ref-type="bibr" rid="B19">19</xref>). All samples were curated and examined by oral pathologists, who reached a consensus diagnosis for OL and OSCC through microscopic evaluation of biopsy specimens, therefore establishing a gold standard for algorithm training. Among the tested models, the RegNetY fusion with MetaBlock model achieved the highest accuracy (0.952), whereas the PiT and ResNetV2 models achieved the highest sensitivity (0.950) and AUC (0.991), respectively (<xref ref-type="bibr" rid="B19">19</xref>).</p>
<p>Another study investigated AI-based approaches for detecting and grading oral epithelial dysplasia (OED) in whole-slide samples of OL (<xref ref-type="bibr" rid="B20">20</xref>). The study used both whole-slide images and tissue microarray data to train and validate several CNN models, with all images labeled by expert pathologists serving as the gold standard. Among these, the EfficientNet-B0 model (E-MOD) demonstrated the best performance at the patch level; however, its performance declined at the slide level achieving an overall accuracy of 63.5&#x0025;, an AUC of 0.673, an average sensitivity of 86.1&#x0025;, and an average specificity of 64&#x0025; (<xref ref-type="bibr" rid="B20">20</xref>). To improve slide-level interpretability and diagnostic accuracy, the authors developed a two-stage system&#x2014;named E-MOD-plus&#x2014;combining 12 feature-specific CNNs to identify histopathological features of dysplasia, followed by a multiclass logistic regression model that integrated these features to predict the overall dysplasia grade. This approach achieved a superior slide-level performance compared to E-MOD alone, with an average accuracy of 86.5&#x0025;, an average AUC of 0.669, an average sensitivity of 70.6&#x0025;, and an average specificity of 79.4&#x0025; (<xref ref-type="bibr" rid="B20">20</xref>). Notably, the E-MOD-plus model also outperformed three junior oral pathologists in OED grading accuracy (<xref ref-type="bibr" rid="B20">20</xref>), highlighting its potential as an auxiliary tool for both identifying key histopathological features and accurately grading OED severity at the whole-slide level.</p>
</sec>
</sec>
<sec id="s4" sec-type="discussion"><title>Discussion</title>
<p>OL carries a malignant transformation risk ranging from 1&#x0025; to 40&#x0025; (<xref ref-type="bibr" rid="B26">26</xref>), underscoring the need for accurate diagnosis and risk stratification, which remains challenging due to its non-specific presentation. Current diagnosis relies heavily on exclusion, combined with a &#x201C;wait-and-see&#x201D; approach supplemented by biopsy. However, this method has important limitations, including histological overlap with other oral pathologies and the dependence on the pathologist&#x0027;s expertise, which introduces additional variability and subjectivity into the diagnostic process (<xref ref-type="bibr" rid="B27">27</xref>).</p>
<p>Recent advancements in computer sciences and AI-based technologies offer an opportunity to overcome these diagnostic barriers, enhancing the precision and reproducibility of OL diagnosis. Both ML and DL models have demonstrated significant value in dentistry, particularly by improving the detection of caries, periodontal disease, apical periodontitis, and salivary gland diseases, among others (<xref ref-type="bibr" rid="B28">28</xref>&#x2013;<xref ref-type="bibr" rid="B30">30</xref>). Overall, their diagnostic performance is able to match, and sometimes even surpass, that of experienced specialists, offering a promising complement to traditional diagnostic workflows (<xref ref-type="bibr" rid="B28">28</xref>&#x2013;<xref ref-type="bibr" rid="B30">30</xref>).</p>
<p>This scoping review identified ten studies that explored the application of AI in OL diagnosis, most of which were published within the past five years, reflecting both the growing interest and rapid technological evolution of this field. This momentum is further supported by initiatives like the MimoSA UPLOAD and MimoSA ANNOTATE tools, which provide centralized platform for collecting and labeling oral lesion images from worldwide cohorts, thereby accelerating the development of AI algorithms capable to detect high-risk oral potentially malignant disorders (<xref ref-type="bibr" rid="B31">31</xref>). Collectively, the included studies spanned multiple regions, including India, China, Brazil, Germany, Poland, and the Netherlands, illustrating broad interdisciplinary and international engagement in the field. However, underrepresentation of other regions and populations may introduce bias into the current evidence landscape and limit global generalizability of results.</p>
<p>Despite the limited number of eligible investigations included in this work (<xref ref-type="bibr" rid="B16">16</xref>&#x2013;<xref ref-type="bibr" rid="B25">25</xref>), the findings consistently support the utility of AI-based tools for OL diagnosis. When differentiating OL from NOM, most models achieved moderate-to-high diagnostic performance, with sensitivity, specificity and accuracy values typically exceeding 80&#x0025;. Comparable results were observed when distinguishing OL from other mimicking conditions, such as OSCC, PVL and other white lesions. Notably, models tended to perform more robustly when identifying clinically complex or advanced lesions, a pattern observed across both spectroscopy- and image-based studies (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B24">24</xref>). However, this does not necessarily indicate that AI models inherently struggle to recognize subtle presentations of OL <italic>per se</italic>, but rather limitations in training data diversity, underrepresentation of mild lesions within datasets, or differences in model generalization and pattern recognition capabilities (<xref ref-type="bibr" rid="B32">32</xref>, <xref ref-type="bibr" rid="B33">33</xref>). Indeed, advanced disease presentations such as OSCC and PVL often display more distinct anatomical or spectroscopic alterations (<xref ref-type="bibr" rid="B34">34</xref>, <xref ref-type="bibr" rid="B35">35</xref>), which AI algorithms can more readily recognize and learn to classify.</p>
<p>To improve performance across the full clinical spectrum of OL, future research should prioritize the inclusion of subtle OL cases during model training and validation. Expanding dataset variability through intentional sampling, multicenter collaborations, and open-access data-sharing initiatives could substantially reduce current biases. Furthermore, applying data augmentation strategies, such as image transformations or contrastive learning with clustering techniques, may help to balance underrepresented categories and enhance model generalizability to less conspicuous lesions (<xref ref-type="bibr" rid="B36">36</xref>, <xref ref-type="bibr" rid="B37">37</xref>). Integrating molecular biomarker data, particularly those reflecting early epithelial alterations and subepithelial microenvironmental changes associated with malignant transformation (an aspect still largely overlooked in OL research) (<xref ref-type="bibr" rid="B26">26</xref>), could further improve OL detection and support risk stratification in the future. This multimodal strategy makes sense as it mirrors clinical reasoning, where visual, contextual and molecular information collectively inform diagnostic judgements.</p>
<p>Beyond data quantity and diversity, the type of data used also plays a critical role in shaping AI performance. In this review, the included investigations used a variety of data modalities including spectroscopy (<xref ref-type="bibr" rid="B16">16</xref>&#x2013;<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B23">23</xref>), clinical photographs (<xref ref-type="bibr" rid="B21">21</xref>, <xref ref-type="bibr" rid="B22">22</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>) and histopathological images (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B20">20</xref>). In some cases, these were combined with electronic health records and/or genomic data, which appeared to enhance the model&#x0027;s diagnostic performance (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B22">22</xref>). For example, Muniz de Lima et al. reported a 30.7&#x0025; increase in OL identification accuracy when clinical data were integrated with histopathological images, compared with image-only approaches (<xref ref-type="bibr" rid="B19">19</xref>). Supporting evidence from other medical fields, points in a similar direction: in dermatology, fusion of dermatoscopic images with clinical data has been shown to outperform unimodal models, especially when using advanced fusion methods such as cross-attention (<xref ref-type="bibr" rid="B38">38</xref>). Taken together, these findings highlight multimodal data integration as a promising and likely necessary direction for developing clinically robust AI tools for OL diagnosis in the future.</p>
<p>Diagnostic performance also varied according to the underlying algorithmic architecture. Early spectroscopy-based approaches that relied on traditional ML methods demonstrated promising sensitivity and specificity but often lacked correlation with lesion morphology or histopathological grade (<xref ref-type="bibr" rid="B16">16</xref>). Subsequent models combining FTIR and Raman analyses achieved higher class-specific accuracies; however, remained at a proof of concept stage and have yet to progress toward clinical validation (<xref ref-type="bibr" rid="B18">18</xref>). In contrast, image- and histopathology-based models employing advanced DL architectures, particularly CNNs and hybrid fusion systems such as E-MOD-Plus frameworks, achieved near-human or even superior diagnostic performance, outperforming general dentists in OL identification and some oral pathologists in epithelial dysplasia grading (<xref ref-type="bibr" rid="B20">20</xref>, <xref ref-type="bibr" rid="B24">24</xref>). Nevertheless, to date, only one of these studies has validated its findings in an external population (<xref ref-type="bibr" rid="B20">20</xref>), underscoring once more the limited generalizability of current models and the need for further multicenter validation efforts.</p>
<p>Finally, it is important to note that none of the AI systems reviewed have been deployed in clinical settings to date. Most remain in preliminary phases of development, with only one study (<xref ref-type="bibr" rid="B20">20</xref>) having conducted external validation. The remaining models rely heavily on internal datasets, limiting their generalizability. This highlights a significant translational gap, making clear that these AI models are not yet suitable for use without expert clinical oversight. Moreover, integrating AI workflows into the diagnosis of OL introduces important ethical and practical challenges that warrant explicit discussion. For instance, high-resolution images may unintentionally expose identifiable facial features raising serious privacy concerns. Algorithmic bias is another concern, as most models have been developed using datasets form specific ethnic populations (primarily Indian, Chinese and German), which may hinder performance across diverse or underserved groups, contributing to diagnostic inequities.</p>
<sec id="s4a"><title>Limitations</title>
<p>The findings of this work should be interpreted with caution, given the inherent limitations of scoping reviews. Despite a broad research question and comprehensive electronic search, only ten studies met the eligibility criteria, restricting the generalizability of findings across diverse clinical settings. In addition, no formal quality or risk bias assessment was performed, making it difficult to judge the overall reliability and strength of the evidence base. The included studies displayed notable methodological differences that limit comparability. Crucially, the reproducibility of findings is compromised by insufficient reporting of key technical details, such as data preprocessing steps (e.g., image normalization, resizing, or augmentation) and model development strategies (e.g., hyperparameter tuning), etc. Furthermore, reference standards significantly varied across studies, ranging from consensus histopathological diagnosis to clinical impression alone, making it difficult to reliably assess the true diagnostic validity for OL, OSCC, NOM, and PVL. Taken together, these factors highlight the need for greater standardization, transparency, and methodological rigor in future research to strengthen confidence in this emerging field.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions"><title>Conclusion</title>
<p>Results from this scoping review underscore the considerable promise of AI-based tools as adjuncts for both the clinical and histopathological diagnosis of OL, with reported sensitivity, specificity and accuracy values frequently exceeding 80&#x0025;. However, the current body of evidence remains at an early developmental stage, characterized by substantial methodological heterogeneity, small sample sizes, and limited external validation. Crucially, current evidence supports the use of AI only as a decision-support system and does not support its independent clinical use, as these tools must always be interpreted by qualified clinicians. To advance toward clinical translation, future research should prioritize standardized reporting frameworks and the external validation of high-performing algorithms in larger, more diverse, and geographically representative cohorts. Furthermore, efforts to enhance model performance across the full clinical spectrum of OL (particularly in subtle presentations), will be essential to improve diagnosis and timely intervention of OL.</p>
</sec>
</body>
<back>
<sec id="s6" sec-type="author-contributions"><title>Author contributions</title>
<p>CJ: Writing &#x2013; review &#x0026; editing, Conceptualization, Supervision, Project administration, Writing &#x2013; original draft, Methodology, Visualization. CL: Writing &#x2013; review &#x0026; editing, Writing &#x2013; original draft, Data curation. TN: Writing &#x2013; review &#x0026; editing, Methodology, Writing &#x2013; original draft, Data curation. AF: Writing &#x2013; review &#x0026; editing, Writing &#x2013; original draft. RM-F: Supervision, Writing &#x2013; original draft, Visualization, Writing &#x2013; review &#x0026; editing. SN: Methodology, Writing &#x2013; review &#x0026; editing, Conceptualization, Writing &#x2013; original draft, Visualization, Project administration.</p>
</sec>
<sec id="s8" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
<p>The author SN declared that they were an editorial board member of Frontiers at the time of submission. This had no impact on the peer review process and the final decision.</p>
<p>The handling editor WAGA declared a past co-authorship with the author(s) RAMF, SEN.</p>
</sec>
<sec id="s9" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s10" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Warnakulasuriya</surname> <given-names>S</given-names></name> <name><surname>Kujan</surname> <given-names>O</given-names></name> <name><surname>Aguirre-Urizar</surname> <given-names>JM</given-names></name> <name><surname>Bagan</surname> <given-names>JV</given-names></name> <name><surname>Gonzalez-Moles</surname> <given-names>MA</given-names></name> <name><surname>Kerr</surname> <given-names>AR</given-names></name><etal/></person-group> <article-title>Oral potentially malignant disorders: a consensus report from an international seminar on nomenclature and classification, convened by the WHO collaborating centre for oral cancer</article-title>. <source>Oral Dis</source>. (<year>2021</year>) <volume>27</volume>(<issue>8</issue>):<fpage>1862</fpage>&#x2013;<lpage>80</lpage>. <pub-id pub-id-type="doi">10.1111/odi.13704</pub-id><pub-id pub-id-type="pmid">33128420</pub-id></mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>C</given-names></name> <name><surname>Li</surname> <given-names>B</given-names></name> <name><surname>Zeng</surname> <given-names>X</given-names></name> <name><surname>Hu</surname> <given-names>X</given-names></name> <name><surname>Hua</surname> <given-names>H</given-names></name></person-group>. <article-title>The global prevalence of oral leukoplakia: a systematic review and meta-analysis from 1996 to 2022</article-title>. <source>BMC Oral Health</source>. (<year>2023</year>) <volume>23</volume>(<issue>1</issue>):<fpage>645</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-023-03342-y</pub-id><pub-id pub-id-type="pmid">37670255</pub-id></mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Iocca</surname> <given-names>O</given-names></name> <name><surname>Sollecito</surname> <given-names>TP</given-names></name> <name><surname>Alawi</surname> <given-names>F</given-names></name> <name><surname>Weinstein</surname> <given-names>GS</given-names></name> <name><surname>Newman</surname> <given-names>JG</given-names></name> <name><surname>De Virgilio</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Potentially malignant disorders of the oral cavity and oral dysplasia: a systematic review and meta-analysis of malignant transformation rate by subtype</article-title>. <source>Head Neck</source>. (<year>2020</year>) <volume>42</volume>(<issue>3</issue>):<fpage>539</fpage>&#x2013;<lpage>55</lpage>. <pub-id pub-id-type="doi">10.1002/hed.26006</pub-id><pub-id pub-id-type="pmid">31803979</pub-id></mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kumari</surname> <given-names>P</given-names></name> <name><surname>Debta</surname> <given-names>P</given-names></name> <name><surname>Dixit</surname> <given-names>A</given-names></name></person-group>. <article-title>Oral potentially malignant disorders: etiology, pathogenesis, and transformation into oral cancer</article-title>. <source>Front Pharmacol</source>. (<year>2022</year>) <volume>13</volume>:<fpage>825266</fpage>. <pub-id pub-id-type="doi">10.3389/fphar.2022.825266</pub-id><pub-id pub-id-type="pmid">35517828</pub-id></mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Napier</surname> <given-names>SS</given-names></name> <name><surname>Speight</surname> <given-names>PM</given-names></name></person-group>. <article-title>Natural history of potentially malignant oral lesions and conditions: an overview of the literature</article-title>. <source>J Oral Pathol Med</source>. (<year>2008</year>) <volume>37</volume>(<issue>1</issue>):<fpage>1</fpage>&#x2013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1111/j.1600-0714.2007.00579.x</pub-id><pub-id pub-id-type="pmid">18154571</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Warnakulasuriya</surname> <given-names>S</given-names></name> <name><surname>Ariyawardana</surname> <given-names>A</given-names></name></person-group>. <article-title>Malignant transformation of oral leukoplakia: a systematic review of observational studies</article-title>. <source>J Oral Pathol Med</source>. (<year>2016</year>) <volume>45</volume>(<issue>3</issue>):<fpage>155</fpage>&#x2013;<lpage>66</lpage>. <pub-id pub-id-type="doi">10.1111/jop.12339</pub-id><pub-id pub-id-type="pmid">26189354</pub-id></mixed-citation></ref>
<ref id="B7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lodi</surname> <given-names>G</given-names></name> <name><surname>Porter</surname> <given-names>S</given-names></name></person-group>. <article-title>Management of potentially malignant disorders: evidence and critique</article-title>. <source>J Oral Pathol Med</source>. (<year>2008</year>) <volume>37</volume>(<issue>2</issue>):<fpage>63</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1111/j.1600-0714.2007.00575.x</pub-id><pub-id pub-id-type="pmid">18197849</pub-id></mixed-citation></ref>
<ref id="B8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van der Waal</surname> <given-names>I</given-names></name></person-group>. <article-title>Oral leukoplakia: present views on diagnosis, management, communication with patients, and research</article-title>. <source>Curr Oral Health Rep</source>. (<year>2019</year>) <volume>6</volume>(<issue>1</issue>):<fpage>9</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.1007/s40496-019-0204-8</pub-id></mixed-citation></ref>
<ref id="B9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sahoo</surname> <given-names>RK</given-names></name> <name><surname>Sahoo</surname> <given-names>KC</given-names></name> <name><surname>Dash</surname> <given-names>GC</given-names></name> <name><surname>Kumar</surname> <given-names>G</given-names></name> <name><surname>Baliarsingh</surname> <given-names>SK</given-names></name> <name><surname>Panda</surname> <given-names>B</given-names></name><etal/></person-group> <article-title>Diagnostic performance of artificial intelligence in detecting oral potentially malignant disorders and oral cancer using medical diagnostic imaging: a systematic review and meta-analysis</article-title>. <source>Front Oral Health</source>. (<year>2024</year>) <volume>5</volume>:<fpage>1494867</fpage>. <pub-id pub-id-type="doi">10.3389/froh.2024.1494867</pub-id><pub-id pub-id-type="pmid">39568787</pub-id></mixed-citation></ref>
<ref id="B10"><label>10.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Suleimenov</surname> <given-names>IE</given-names></name> <name><surname>Vitulyova</surname> <given-names>YS</given-names></name> <name><surname>Bakirov</surname> <given-names>AS</given-names></name> <name><surname>Gabrielyan</surname> <given-names>OA</given-names></name></person-group>. <article-title>Artificial intelligence: what is it?</article-title> <conf-name>Proceedings of the 2020 6th International Conference on Computer and Technology Applications; Antalya, Turkey: Association for Computing Machinery</conf-name> (<year>2020</year>). p. <fpage>22</fpage>&#x2013;<lpage>5</lpage></mixed-citation></ref>
<ref id="B11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jordan</surname> <given-names>MI</given-names></name> <name><surname>Mitchell</surname> <given-names>TM</given-names></name></person-group>. <article-title>Machine learning: trends, perspectives, and prospects</article-title>. <source>Science</source>. (<year>2015</year>) <volume>349</volume>(<issue>6245</issue>):<fpage>255</fpage>&#x2013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.1126/science.aaa8415</pub-id><pub-id pub-id-type="pmid">26185243</pub-id></mixed-citation></ref>
<ref id="B12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Zhao</surname> <given-names>Y</given-names></name> <name><surname>Pourpanah</surname> <given-names>F</given-names></name></person-group>. <article-title>Recent advances in deep learning</article-title>. <source>Int J Mach Learn Cyber</source>. (<year>2020</year>) <volume>11</volume>(<issue>4</issue>):<fpage>747</fpage>&#x2013;<lpage>50</lpage>. <pub-id pub-id-type="doi">10.1007/s13042-020-01096-5</pub-id></mixed-citation></ref>
<ref id="B13"><label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rajaram Mohan</surname> <given-names>K</given-names></name> <name><surname>Mathew Fenn</surname> <given-names>S</given-names></name></person-group>. <article-title>Artificial intelligence and its theranostic applications in dentistry</article-title>. <source>Cureus</source>. (<year>2023</year>) <volume>15</volume>(<issue>5</issue>):<fpage>e38711</fpage>. <pub-id pub-id-type="doi">10.7759/cureus.38711</pub-id><pub-id pub-id-type="pmid">37292569</pub-id></mixed-citation></ref>
<ref id="B14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X-L</given-names></name> <name><surname>Zhou</surname> <given-names>G</given-names></name></person-group>. <article-title>Deep learning in the diagnosis and prognosis of oral potentially malignant disorders</article-title>. <source>Cancer Screen Prev</source>. (<year>2024</year>) <volume>3</volume>(<issue>4</issue>):<fpage>203</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.14218/CSP.2024.00025</pub-id></mixed-citation></ref>
<ref id="B15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Peters</surname> <given-names>MDJ</given-names></name> <name><surname>Marnie</surname> <given-names>C</given-names></name> <name><surname>Tricco</surname> <given-names>AC</given-names></name> <name><surname>Pollock</surname> <given-names>D</given-names></name> <name><surname>Munn</surname> <given-names>Z</given-names></name> <name><surname>Alexander</surname> <given-names>L</given-names></name><etal/></person-group> <article-title>Updated methodological guidance for the conduct of scoping reviews</article-title>. <source>JBI Evid Synth</source>. (<year>2020</year>) <volume>18</volume>(<issue>10</issue>):<fpage>2119</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.11124/JBIES-20-00167</pub-id><pub-id pub-id-type="pmid">33038124</pub-id></mixed-citation></ref>
<ref id="B16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van Staveren</surname> <given-names>HJ</given-names></name> <name><surname>van Veen</surname> <given-names>RL</given-names></name> <name><surname>Speelman</surname> <given-names>OC</given-names></name> <name><surname>Witjes</surname> <given-names>MJ</given-names></name> <name><surname>Star</surname> <given-names>WM</given-names></name> <name><surname>Roodenburg</surname> <given-names>JL</given-names></name></person-group>. <article-title>Classification of clinical autofluorescence spectra of oral leukoplakia using an artificial neural network: a pilot study</article-title>. <source>Oral Oncol</source>. (<year>2000</year>) <volume>36</volume>(<issue>3</issue>):<fpage>286</fpage>&#x2013;<lpage>93</lpage>. <pub-id pub-id-type="doi">10.1016/S1368-8375(00)00004-X</pub-id><pub-id pub-id-type="pmid">10793332</pub-id></mixed-citation></ref>
<ref id="B17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Banerjee</surname> <given-names>S</given-names></name> <name><surname>Pal</surname> <given-names>M</given-names></name> <name><surname>Chakrabarty</surname> <given-names>J</given-names></name> <name><surname>Petibois</surname> <given-names>C</given-names></name> <name><surname>Paul</surname> <given-names>RR</given-names></name> <name><surname>Giri</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Fourier-transform-infrared-spectroscopy based spectral-biomarker selection towards optimum diagnostic differentiation of oral leukoplakia and cancer</article-title>. <source>Anal Bioanal Chem</source>. (<year>2015</year>) <volume>407</volume>(<issue>26</issue>):<fpage>7935</fpage>&#x2013;<lpage>43</lpage>. <pub-id pub-id-type="doi">10.1007/s00216-015-8960-3</pub-id><pub-id pub-id-type="pmid">26342309</pub-id></mixed-citation></ref>
<ref id="B18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ghosh</surname> <given-names>A</given-names></name> <name><surname>Chaudhuri</surname> <given-names>D</given-names></name> <name><surname>Adhikary</surname> <given-names>S</given-names></name> <name><surname>Chatterjee</surname> <given-names>K</given-names></name> <name><surname>Roychowdhury</surname> <given-names>A</given-names></name> <name><surname>Das</surname> <given-names>AK</given-names></name><etal/></person-group> <article-title>Deep reinforced neural network model for cyto-spectroscopic analysis of epigenetic markers for automated oral cancer risk prediction</article-title>. <source>Chemom Intell Lab Syst</source>. (<year>2022</year>) <volume>224</volume>:<fpage>104548</fpage>. <pub-id pub-id-type="doi">10.1016/j.chemolab.2022.104548</pub-id></mixed-citation></ref>
<ref id="B19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>de Lima</surname> <given-names>LM</given-names></name> <name><surname>de Assis</surname> <given-names>MCFR</given-names></name> <name><surname>Soares</surname> <given-names>JP</given-names></name> <name><surname>Gr&#x00E3;o-Velloso</surname> <given-names>TR</given-names></name> <name><surname>de Barros</surname> <given-names>LAP</given-names></name> <name><surname>Camisasca</surname> <given-names>DR</given-names></name><etal/></person-group> <article-title>Importance of complementary data to histopathological image analysis of oral leukoplakia and carcinoma using deep neural networks</article-title>. <source>Intelligent Medicine</source>. (<year>2023</year>) <volume>3</volume>(<issue>4</issue>):<fpage>258</fpage>&#x2013;<lpage>66</lpage>. <pub-id pub-id-type="doi">10.1016/j.imed.2023.01.004</pub-id></mixed-citation></ref>
<ref id="B20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Peng</surname> <given-names>J</given-names></name> <name><surname>Xu</surname> <given-names>Z</given-names></name> <name><surname>Dan</surname> <given-names>H</given-names></name> <name><surname>Li</surname> <given-names>J</given-names></name> <name><surname>Wang</surname> <given-names>J</given-names></name> <name><surname>Luo</surname> <given-names>X</given-names></name><etal/></person-group> <article-title>Oral epithelial dysplasia detection and grading in oral leukoplakia using deep learning</article-title>. <source>BMC Oral Health</source>. (<year>2024</year>) <volume>24</volume>(<issue>1</issue>):<fpage>434</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-024-04191-z</pub-id><pub-id pub-id-type="pmid">38594651</pub-id></mixed-citation></ref>
<ref id="B21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ramesh</surname> <given-names>E</given-names></name> <name><surname>Ganesan</surname> <given-names>A</given-names></name> <name><surname>Lakshmi</surname> <given-names>KC</given-names></name> <name><surname>Natarajan</surname> <given-names>PM</given-names></name></person-group>. <article-title>Artificial intelligence-based diagnosis of oral leukoplakia using deep convolutional neural networks Xception and MobileNet-v2</article-title>. <source>Front Oral Health</source>. (<year>2025</year>) <volume>6</volume>:<fpage>1414524</fpage>. <pub-id pub-id-type="doi">10.3389/froh.2025.1414524</pub-id><pub-id pub-id-type="pmid">40191066</pub-id></mixed-citation></ref>
<ref id="B22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schmidl</surname> <given-names>B</given-names></name> <name><surname>Hutten</surname> <given-names>T</given-names></name> <name><surname>Pigorsch</surname> <given-names>S</given-names></name> <name><surname>Stogbauer</surname> <given-names>F</given-names></name> <name><surname>Hoch</surname> <given-names>CC</given-names></name> <name><surname>Hussain</surname> <given-names>T</given-names></name><etal/></person-group> <article-title>Artificial intelligence for image recognition in diagnosing oral and oropharyngeal cancer and leukoplakia</article-title>. <source>Sci Rep</source>. (<year>2025</year>) <volume>15</volume>(<issue>1</issue>):<fpage>3625</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-85920-4</pub-id><pub-id pub-id-type="pmid">39880876</pub-id></mixed-citation></ref>
<ref id="B23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>L</given-names></name> <name><surname>Chang</surname> <given-names>Q</given-names></name> <name><surname>Zhang</surname> <given-names>Q</given-names></name> <name><surname>Zou</surname> <given-names>S</given-names></name> <name><surname>Liu</surname> <given-names>D</given-names></name> <name><surname>Gao</surname> <given-names>F</given-names></name><etal/></person-group> <article-title>Sub-diffuse reflectance spectroscopy combined with machine learning method for oral mucosal disease identification</article-title>. <source>Lasers Surg Med</source>. (<year>2025</year>) <volume>57</volume>(<issue>4</issue>):<fpage>339</fpage>&#x2013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.1002/lsm.70011</pub-id><pub-id pub-id-type="pmid">40197749</pub-id></mixed-citation></ref>
<ref id="B24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schwarzler</surname> <given-names>J</given-names></name> <name><surname>Tolstaya</surname> <given-names>E</given-names></name> <name><surname>Tichy</surname> <given-names>A</given-names></name> <name><surname>Paris</surname> <given-names>S</given-names></name> <name><surname>Aarabi</surname> <given-names>G</given-names></name> <name><surname>Chaurasia</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Machine learning versus clinicians for detection and classification of oral mucosal lesions</article-title>. <source>J Dent</source>. (<year>2025</year>) <volume>161</volume>:<fpage>105992</fpage>. <pub-id pub-id-type="doi">10.1016/j.jdent.2025.105992</pub-id><pub-id pub-id-type="pmid">40695439</pub-id></mixed-citation></ref>
<ref id="B25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jurczyszyn</surname> <given-names>K</given-names></name> <name><surname>Gedrange</surname> <given-names>T</given-names></name> <name><surname>Kozakiewicz</surname> <given-names>M</given-names></name></person-group>. <article-title>Theoretical background to automated diagnosing of oral leukoplakia: a preliminary report</article-title>. <source>J Healthc Eng</source>. (<year>2020</year>) <volume>2020</volume>:<fpage>8831161</fpage>. <pub-id pub-id-type="doi">10.1155/2020/8831161</pub-id><pub-id pub-id-type="pmid">33005316</pub-id></mixed-citation></ref>
<ref id="B26"><label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gonzalez-Arriagada</surname> <given-names>WA</given-names></name> <name><surname>Canedo-Marroquin</surname> <given-names>G</given-names></name> <name><surname>Adorno-Farias</surname> <given-names>D</given-names></name> <name><surname>Fernandez-Ramires</surname> <given-names>R</given-names></name></person-group>. <article-title>New insights into the role of the oral leukoplakia microenvironment in malignant transformation</article-title>. <source>Front Oral Health</source>. (<year>2024</year>) <volume>5</volume>:<fpage>1363052</fpage>. <pub-id pub-id-type="doi">10.3389/froh.2024.1363052</pub-id><pub-id pub-id-type="pmid">38450102</pub-id></mixed-citation></ref>
<ref id="B27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>van der Waal</surname> <given-names>I</given-names></name></person-group>. <article-title>Oral leukoplakia: A diagnostic challenge for clinicians and pathologists</article-title>. <source>Oral Dis</source>. (<year>2019</year>) <volume>25</volume>(<issue>1</issue>):<fpage>348</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1111/odi.12976</pub-id><pub-id pub-id-type="pmid">30203899</pub-id></mixed-citation></ref>
<ref id="B28"><label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ding</surname> <given-names>H</given-names></name> <name><surname>Wu</surname> <given-names>J</given-names></name> <name><surname>Zhao</surname> <given-names>W</given-names></name> <name><surname>Matinlinna</surname> <given-names>JP</given-names></name> <name><surname>Burrow</surname> <given-names>MF</given-names></name> <name><surname>Tsoi</surname> <given-names>JKH</given-names></name></person-group>. <article-title>Artificial intelligence in dentistry-A review</article-title>. <source>Front Dent Med</source>. (<year>2023</year>) <volume>4</volume>:<fpage>1085251</fpage>. <pub-id pub-id-type="doi">10.3389/fdmed.2023.1085251</pub-id><pub-id pub-id-type="pmid">39935549</pub-id></mixed-citation></ref>
<ref id="B29"><label>29.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khanagar</surname> <given-names>SB</given-names></name> <name><surname>Al-Ehaideb</surname> <given-names>A</given-names></name> <name><surname>Maganur</surname> <given-names>PC</given-names></name> <name><surname>Vishwanathaiah</surname> <given-names>S</given-names></name> <name><surname>Patil</surname> <given-names>S</given-names></name> <name><surname>Baeshen</surname> <given-names>HA</given-names></name><etal/></person-group> <article-title>Developments, application, and performance of artificial intelligence in dentistry - A systematic review</article-title>. <source>J Dent Sci</source>. (<year>2021</year>) <volume>16</volume>(<issue>1</issue>):<fpage>508</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1016/j.jds.2020.06.019</pub-id><pub-id pub-id-type="pmid">33384840</pub-id></mixed-citation></ref>
<ref id="B30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Basso</surname> <given-names>A</given-names></name> <name><surname>Salas</surname> <given-names>F</given-names></name> <name><surname>Hernandez</surname> <given-names>M</given-names></name> <name><surname>Fernandez</surname> <given-names>A</given-names></name> <name><surname>Sierra</surname> <given-names>A</given-names></name> <name><surname>Jimenez</surname> <given-names>C</given-names></name></person-group>. <article-title>Machine learning and deep learning models for the diagnosis of apical periodontitis: a scoping review</article-title>. <source>Clin Oral Investig</source>. (<year>2024</year>) <volume>28</volume>(<issue>11</issue>):<fpage>600</fpage>. <pub-id pub-id-type="doi">10.1007/s00784-024-05989-5</pub-id><pub-id pub-id-type="pmid">39419893</pub-id></mixed-citation></ref>
<ref id="B31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alotaibi</surname> <given-names>S</given-names></name> <name><surname>Deligianni</surname> <given-names>E</given-names></name></person-group>. <article-title>AI in oral medicine: is the future already here? A literature review</article-title>. <source>Br Dent J</source>. (<year>2024</year>) <volume>237</volume>(<issue>10</issue>):<fpage>765</fpage>&#x2013;<lpage>70</lpage>. <pub-id pub-id-type="doi">10.1038/s41415-024-8029-9</pub-id><pub-id pub-id-type="pmid">39572810</pub-id></mixed-citation></ref>
<ref id="B32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tang</surname> <given-names>H</given-names></name> <name><surname>Sun</surname> <given-names>N</given-names></name> <name><surname>Shen</surname> <given-names>S</given-names></name></person-group>. <article-title>Improving generalization of deep learning models for diagnostic pathology by increasing variability in training data: experiments on osteosarcoma subtypes</article-title>. <source>J Pathol Inform</source>. (<year>2021</year>) <volume>12</volume>:<fpage>30</fpage>. <pub-id pub-id-type="doi">10.4103/jpi.jpi_78_20</pub-id><pub-id pub-id-type="pmid">34497734</pub-id></mixed-citation></ref>
<ref id="B33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hatta</surname> <given-names>S</given-names></name> <name><surname>Ichiuji</surname> <given-names>Y</given-names></name> <name><surname>Mabu</surname> <given-names>S</given-names></name> <name><surname>Kugler</surname> <given-names>M</given-names></name> <name><surname>Hontani</surname> <given-names>H</given-names></name> <name><surname>Okoshi</surname> <given-names>T</given-names></name><etal/></person-group> <article-title>Improved artificial intelligence discrimination of minor histological populations by supplementing with color-adjusted images</article-title>. <source>Sci Rep</source>. (<year>2023</year>) <volume>13</volume>(<issue>1</issue>):<fpage>19068</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-023-46472-7</pub-id><pub-id pub-id-type="pmid">37925580</pub-id></mixed-citation></ref>
<ref id="B34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Arroyo</surname> <given-names>E</given-names></name> <name><surname>Perez Sayans</surname> <given-names>M</given-names></name> <name><surname>Bravo</surname> <given-names>SB</given-names></name> <name><surname>de Oliveira Barbeiro</surname> <given-names>C</given-names></name> <name><surname>Paravani Palacon</surname> <given-names>M</given-names></name> <name><surname>Chamorro Petronacci</surname> <given-names>CM</given-names></name><etal/></person-group> <article-title>Identification of proteomic biomarkers in proliferative verrucous leukoplakia through liquid chromatography with tandem mass spectrometry</article-title>. <source>Lab Invest</source>. (<year>2023</year>) <volume>103</volume>(<issue>10</issue>):<fpage>100222</fpage>. <pub-id pub-id-type="doi">10.1016/j.labinv.2023.100222</pub-id><pub-id pub-id-type="pmid">37507024</pub-id></mixed-citation></ref>
<ref id="B35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sharma</surname> <given-names>PN</given-names></name> <name><surname>Chaudhary</surname> <given-names>M</given-names></name> <name><surname>Patel</surname> <given-names>SA</given-names></name> <name><surname>Zade</surname> <given-names>PR</given-names></name></person-group>. <article-title>Screening of oral squamous cell carcinoma through color intensity-based textural features</article-title>. <source>Cureus</source>. (<year>2024</year>) <volume>16</volume>(<issue>3</issue>):<fpage>e56682</fpage>. <pub-id pub-id-type="doi">10.7759/cureus.56682</pub-id><pub-id pub-id-type="pmid">38646364</pub-id></mixed-citation></ref>
<ref id="B36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Alam</surname> <given-names>MN</given-names></name> <name><surname>Yamashita</surname> <given-names>R</given-names></name> <name><surname>Ramesh</surname> <given-names>V</given-names></name> <name><surname>Prabhune</surname> <given-names>T</given-names></name> <name><surname>Lim</surname> <given-names>JI</given-names></name> <name><surname>Chan</surname> <given-names>RVP</given-names></name><etal/></person-group> <article-title>Contrastive learning-based pretraining improves representation and transferability of diabetic retinopathy classification models</article-title>. <source>Sci Rep</source>. (<year>2023</year>) <volume>13</volume>(<issue>1</issue>):<fpage>6047</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-023-33365-y</pub-id><pub-id pub-id-type="pmid">37055475</pub-id></mixed-citation></ref>
<ref id="B37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xu</surname> <given-names>X</given-names></name> <name><surname>Wong</surname> <given-names>STC</given-names></name></person-group>. <article-title>Contrastive learning in brain imaging</article-title>. <source>Comput Med Imaging Graph</source>. (<year>2025</year>) <volume>121</volume>:<fpage>102500</fpage>. <pub-id pub-id-type="doi">10.1016/j.compmedimag.2025.102500</pub-id><pub-id pub-id-type="pmid">39889467</pub-id></mixed-citation></ref>
<ref id="B38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Das</surname> <given-names>A</given-names></name> <name><surname>Agarwal</surname> <given-names>V</given-names></name> <name><surname>Shetty</surname> <given-names>NP</given-names></name></person-group>. <article-title>Comparative analysis of multimodal architectures for effective skin lesion detection using clinical and image data</article-title>. <source>Front Artif Intell</source>. (<year>2025</year>) <volume>8</volume>:<fpage>1608837</fpage>. <pub-id pub-id-type="doi">10.3389/frai.2025.1608837</pub-id><pub-id pub-id-type="pmid">40951333</pub-id></mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/885655/overview">Wilfredo Alejandro Gonz&#x00E1;lez-Arriagada</ext-link>, University of the Andes, Chile</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1007669/overview">Weeraya Tantanapornkul</ext-link>, Naresuan University, Thailand</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2847082/overview">Sanjeev Balappa Khanagar</ext-link>, King Saud bin Abdulaziz University for Health Sciences, Saudi Arabia</p></fn>
</fn-group>
</back>
</article>