<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="review-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Dent. Med.</journal-id><journal-title-group>
<journal-title>Frontiers in Dental Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Dent. Med.</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-4915</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdmed.2026.1729825</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Review</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Accuracy of artificial intelligence applications in periodontics: a thematic narrative review</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes"><name><surname>Azhari</surname><given-names>Ady</given-names></name>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/3244564/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role></contrib>
</contrib-group>
<aff id="aff1"><institution>Department of Periodontology, Faculty of Dentistry, King Abdulaziz University</institution>, <city>Jeddah</city>, <country country="sa">Saudi Arabia</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Ady Azhari <email xlink:href="mailto:aazhari1@kau.edu.sa">aazhari1@kau.edu.sa</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-01-22"><day>22</day><month>01</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2026</year></pub-date>
<volume>7</volume><elocation-id>1729825</elocation-id>
<history>
<date date-type="received"><day>21</day><month>10</month><year>2025</year></date>
<date date-type="rev-recd"><day>04</day><month>12</month><year>2025</year></date>
<date date-type="accepted"><day>02</day><month>01</month><year>2026</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 Azhari.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>Azhari</copyright-holder><license><ali:license_ref start_date="2026-01-22">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<abstract><sec><title>Background</title>
<p>Artificial intelligence (AI) has been increasingly applied to periodontal diagnostics across periapical, bitewing, panoramic radiographs, cone-beam computed tomography (CBCT), and intraoral photographs. Recent multicenter, external validation, and explainability-focused studies have advanced the field, yet variability in datasets, anatomical sites, reference standards, model architectures, and reporting practices introduces significant heterogeneity. A structured synthesis of current evidence is therefore warranted.</p>
</sec><sec><title>Main text</title>
<p>This review synthesizes 35 studies published between 2019 and 2025, evaluating AI applications in four diagnostic domains: detection of periodontal bone loss, measurement of alveolar bone levels, identification of furcation involvement, and detection of periapical lesions. Convolutional neural network (CNN)-based models using periapical radiographs achieved moderate-to-high diagnostic accuracy (0.82&#x2013;0.85) and AUCs above 0.88, comparable to clinician performance. Panoramic radiographs yielded lower sensitivity and specificity than CBCT, where deep learning systems reached higher accuracy (up to 0.91) and superior volumetric assessment. Intraoral photographic analyses showed variable performance (0.46&#x2013;1.00), largely due to inconsistent imaging and reference standards. Emerging trends include hybrid segmentation&#x2013;classification architectures, transformer-based networks, and clinician-in-the-loop approaches. Determinants of performance encompass reference standard quality, dataset diversity, anatomical complexity, and adherence to STARD-AI and TRIPOD-AI reporting frameworks.</p>
</sec><sec><title>Conclusions</title>
<p>AI demonstrates clinically relevant diagnostic accuracy in periodontal imaging, especially for measurement standardization and decision support. Although autonomous diagnosis remains premature, integrating explainable, externally validated AI systems within clinician-guided workflows supported by standardized reporting offers a practical route toward clinical translation.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>cone-beam computed tomography</kwd>
<kwd>diagnostic accuracy</kwd>
<kwd>intraoral imaging</kwd>
<kwd>machine learning</kwd>
<kwd>panoramic radiography</kwd>
<kwd>periodontal bone loss</kwd>
<kwd>periodontics</kwd>
</kwd-group><funding-group><funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement></funding-group><counts>
<fig-count count="0"/>
<table-count count="1"/><equation-count count="0"/><ref-count count="73"/><page-count count="9"/><word-count count="0"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Periodontics</meta-value></custom-meta></custom-meta-group>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><label>1</label><title>Introduction</title>
<p>Periodontal diseases are among the most prevalent chronic inflammatory conditions globally, affecting billions of individuals and contributing substantially to the global burden of oral disease (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). Accurate and early diagnosis is essential to prevent progression and tooth loss. Conventional diagnostic approaches rely primarily on clinical probing and two-dimensional (2D) radiographic interpretation, both of which have recognized limitations. Clinical measurements are prone to examiner variability and may underestimate localized defects, while 2D radiographs provide only planar projections that can distort anatomical relationships and obscure bone morphology (<xref ref-type="bibr" rid="B3">3</xref>&#x2013;<xref ref-type="bibr" rid="B5">5</xref>).</p>
<p>Artificial intelligence (AI), particularly deep learning (DL) methods such as convolutional neural networks (CNNs), has emerged as a powerful adjunct to conventional diagnostic workflows in dentistry (<xref ref-type="bibr" rid="B6">6</xref>&#x2013;<xref ref-type="bibr" rid="B9">9</xref>). Over the last decade, AI applications have rapidly expanded across dental disciplines, including periodontology, endodontics, caries detection, and implant planning. Various imaging modalities have been explored, including periapical and bitewing radiographs, panoramic radiographs, cone-beam computed tomography (CBCT), and intraoral photographs (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B10">10</xref>&#x2013;<xref ref-type="bibr" rid="B16">16</xref>). Across these modalities, AI has been applied to critical diagnostic tasks, such as detecting periodontal bone loss (PBL), measuring alveolar bone levels, identifying furcation involvement, and detecting periapical lesions.</p>
<p>On periapical radiographs, large-scale diagnostic studies have shown that CNN-based classifiers can achieve accuracies between 0.82 and 0.85, with AUC values above 0.88, often comparable to or exceeding average clinician performance (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B17">17</xref>). Systematic reviews and meta-analyses have affirmed the trend toward high diagnostic performance, and in individual AI imaging studies, specificities as high as 0.98 have been reported&#x2014;underscoring the promise of AI-assisted periodontal diagnosis (<xref ref-type="bibr" rid="B15">15</xref>, <xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B19">19</xref>). In panoramic radiographs, CNN-based models have been reported to achieve diagnostic accuracies exceeding 90&#x0025;, in some cases outperforming dentists whose accuracy ranges around 76&#x0025;&#x2013;78&#x0025;, indicating balanced sensitivity&#x2013;specificity profiles (<xref ref-type="bibr" rid="B20">20</xref>). However, despite these strong internal results, our DeepLabv3&#x002B; model demonstrated a notable decline in performance when evaluated on external data, reflecting a pattern widely observed in dental AI systems. For example, Chau et al. (<xref ref-type="bibr" rid="B21">21</xref>) reported high internal accuracy using a CNN-based photographic system for gingivitis detection, yet external validation of the same mHealth tool in Chau et al. (<xref ref-type="bibr" rid="B22">22</xref>) showed a substantial drop in specificity. Such discrepancies highlight the impact of dataset shift, arising from variations in imaging devices, acquisition protocols, and software versions; device heterogeneity across clinical sites; and differences in lighting conditions, angulation, and overall image quality, particularly in real-world dental imaging. Collectively, these factors contribute to external generalizability limitations, emphasizing the need for rigorous multi-center external validation before AI systems can be reliably deployed across diverse populations and imaging environments. Recent advancements have expanded to CBCT imaging, enabling volumetric assessments and detection of complex anatomical changes. Deep learning&#x2013;based segmentation models have demonstrated promising performance, achieving accuracies around 0.91 and AUC values up to 0.96 for alveolar bone loss detection, with moderate performance in furcation defect identification (AUC&#x2009;&#x2248;&#x2009;0.81). These findings highlight the potential of AI to enhance diagnostic precision in 3D periodontal imaging (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B23">23</xref>). Intraoral photographic applications, increasingly relevant for screening and teledentistry, have shown variable diagnostic performance (with some studies reporting classification accuracies as low as &#x223C;0.46, others approaching 1.00), though these extremes are not consistent across all works, owing to differences in protocol, device type, and reference standard (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B24">24</xref>, <xref ref-type="bibr" rid="B25">25</xref>).</p>
<p>Methodological developments have also matured. Frameworks such as STARD-AI and TRIPOD-AI provide standardized guidance for transparent reporting of AI-based diagnostic accuracy studies (<xref ref-type="bibr" rid="B25">25</xref>&#x2013;<xref ref-type="bibr" rid="B28">28</xref>). Studies are increasingly incorporating external validation, multicenter datasets, and explainable AI techniques, aligning dental AI research with broader medical imaging standards (<xref ref-type="bibr" rid="B29">29</xref>). Recent dental CBCT/CT studies&#x2014;for example in tooth segmentation meta-analyses, mandibular canal or root resorption quantification, and gender/age estimation using attention-based CBCT models&#x2014;demonstrate this trend and show excellent performance in external/test datasets (<xref ref-type="bibr" rid="B30">30</xref>&#x2013;<xref ref-type="bibr" rid="B34">34</xref>). Several recent overviews have synthesized AI performance across dental fields and highlighted the importance of structured reporting, external validation, and clinician engagement for successful clinical translation. Notably, Jundaeng et al. (<xref ref-type="bibr" rid="B35">35</xref>) review trends in AI for periodontitis diagnosis, and Tuygunov et al. (<xref ref-type="bibr" rid="B36">36</xref>) provide a comprehensive overview of challenges and enablers in integrating AI into dental practice (<xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B36">36</xref>).</p>
<p>Given this rapidly expanding and heterogeneous evidence base, a thematic narrative review is warranted to synthesize current literature on AI diagnostic accuracy in periodontics. This review groups studies thematically by imaging modality, diagnostic task, and AI architecture, while examining determinants of performance and implications for clinical integration. This study was conducted as a thematic narrative review rather than a systematic review, and therefore did not follow a formal PRISMA-based screening or quantitative meta-analytic protocol.</p>
</sec>
<sec id="s2"><label>2</label><title>Main text&#x2014;thematic narrative review</title>
<sec id="s2a"><label>2.1</label><title>Periapical and bitewing radiographs</title>
<p>Periapical and bitewing radiographs have been the most widely used modalities for AI applications in periodontal diagnostics, given their routine use in clinical practice and relatively high spatial resolution. CNN-based models have achieved diagnostic accuracies between 0.82 and 0.85 with AUC values above 0.88, comparable to average clinician performance (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B15">15</xref>). An overview of the key characteristics and diagnostic performance of the included studies is summarized in <xref ref-type="table" rid="T1">Table 1</xref>. Similar performance trends have also been observed in panoramic radiographs (<xref ref-type="bibr" rid="B19">19</xref>). Subsequent studies have leveraged larger multicenter datasets, advanced segmentation&#x2013;classification pipelines, and external validation strategies to enhance generalizability (<xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B38">38</xref>). For example, Yadalam et al. (<xref ref-type="bibr" rid="B38">38</xref>) conducted a multicenter external validation of deep CNNs on periapical radiographs for alveolar bone loss detection, reporting sensitivities above 0.90 and consistent performance across centers (<xref ref-type="bibr" rid="B38">38</xref>).</p>
<table-wrap id="T1" position="float"><label>Table&#x00A0;1</label>
<caption><p>Summary of the 35 synthesized studies on AI diagnostic accuracy in periodontics (2019&#x2013;2025).</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">No.</th>
<th valign="top" align="center">Author(s), Year</th>
<th valign="top" align="center">Imaging modality</th>
<th valign="top" align="center">Diagnostic task/focus</th>
<th valign="top" align="center">AI model/approach</th>
<th valign="top" align="center">Key findings (accuracy/AUC)</th>
<th valign="top" align="center">Ref.</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">1</td>
<td valign="top" align="left">AlGhaihab et al. (2025)</td>
<td valign="top" align="left">Periapical/Bitewing</td>
<td valign="top" align="left">Alveolar bone loss detection</td>
<td valign="top" align="left">Deep CNN</td>
<td valign="top" align="left">Accuracy &#x003E;0.85; strong correlation with manual measures</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B10">10</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">2</td>
<td valign="top" align="left">&#x00C7;elik et al. (2023)</td>
<td valign="top" align="left">Panoramic</td>
<td valign="top" align="left">Periapical lesion detection</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">Reliable lesion detection vs. clinicians</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B11">11</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">3</td>
<td valign="top" align="left">Hoss et al. (2023)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">Periodontal bone loss detection</td>
<td valign="top" align="left">Multiple CNNs</td>
<td valign="top" align="left">AUC &#x003E;0.88; comparable to dentists</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B12">12</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">4</td>
<td valign="top" align="left">Tariq et al. (2023)</td>
<td valign="top" align="left">Systematic Review</td>
<td valign="top" align="left">Radiographic PBL detection</td>
<td valign="top" align="left">Meta-analysis</td>
<td valign="top" align="left">Pooled specificity 0.91&#x2013;0.98</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B15">15</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">5</td>
<td valign="top" align="left">Iacob et al. (2025)</td>
<td valign="top" align="left">Systematic Review</td>
<td valign="top" align="left">PBL in 2D radiographs</td>
<td valign="top" align="left">Meta-analysis</td>
<td valign="top" align="left">Consistent high accuracy across CNNs</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B18">18</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">6</td>
<td valign="top" align="left">Kabir et al. (2021)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">Periodontitis stage grading</td>
<td valign="top" align="left">HYNETS hybrid CNN</td>
<td valign="top" align="left">AUC &#x2248;0.97</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B39">39</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">7</td>
<td valign="top" align="left">Chang et al. (2020)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">Bone loss and staging</td>
<td valign="top" align="left">Hybrid DL model</td>
<td valign="top" align="left">Accuracy 0.94; improved reproducibility</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B62">62</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">8</td>
<td valign="top" align="left">Muhammed Sunnetci et al. (2022)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">Bone loss classification</td>
<td valign="top" align="left">Hybrid DL&#x2009;&#x002B;&#x2009;ML</td>
<td valign="top" align="left">Improved interpretability</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B63">63</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">9</td>
<td valign="top" align="left">Dujic et al. (2023)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">PBL detection</td>
<td valign="top" align="left">Vision Transformer</td>
<td valign="top" align="left">High accuracy, good generalization</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B64">64</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">10</td>
<td valign="top" align="left">Mei et al. (2025)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">Disease diagnosis</td>
<td valign="top" align="left">Clinical knowledge-guided hybrid</td>
<td valign="top" align="left">Enhanced accuracy vs. baseline CNN</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B37">37</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">11</td>
<td valign="top" align="left">Widyaningrum et al. (2025)</td>
<td valign="top" align="left">Panoramic</td>
<td valign="top" align="left">Periodontitis detection/staging</td>
<td valign="top" align="left">Two-stage CNN</td>
<td valign="top" align="left">Accuracy 0.91&#x2013;0.94</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B40">40</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">12</td>
<td valign="top" align="left">Chen et al. (2024)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">Early bone loss diagnosis</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">Accuracy 0.90; supports early detection</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B41">41</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">13</td>
<td valign="top" align="left">Krois et al. (2021)</td>
<td valign="top" align="left">Cross-modal</td>
<td valign="top" align="left">Dental image analysis</td>
<td valign="top" align="left">CNN generalizability</td>
<td valign="top" align="left">Model transferability validated</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B42">42</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">14</td>
<td valign="top" align="left">Kurt-Bayrakdar et al. (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Bone loss pattern detection</td>
<td valign="top" align="left">Deep CNN</td>
<td valign="top" align="left">Accuracy 0.91; volumetric precision</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B13">13</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">15</td>
<td valign="top" align="left">Xue et al. (2024)</td>
<td valign="top" align="left">Panoramic</td>
<td valign="top" align="left">Bone loss &#x0026; periodontitis stage</td>
<td valign="top" align="left">DL classifier</td>
<td valign="top" align="left">Accuracy &#x003E;0.84; AUC 0.92</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B16">16</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">16</td>
<td valign="top" align="left">Chatzopoulos et al. (2025)</td>
<td valign="top" align="left">Panoramic</td>
<td valign="top" align="left">Furcation defect classification</td>
<td valign="top" align="left">Systematic review</td>
<td valign="top" align="left">Highlights AI potential in furcation analysis</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B20">20</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">17</td>
<td valign="top" align="left">Shetty et al. (2024)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Furcation involvement</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">Accuracy 0.91; AUC 0.98</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B45">45</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">18</td>
<td valign="top" align="left">Palkovics et al. (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Periodontal bone segmentation</td>
<td valign="top" align="left">DL segmentation</td>
<td valign="top" align="left">Accurate 3D bone contour detection</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B50">50</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">19</td>
<td valign="top" align="left">Pan et al. (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Mandibular canal localization</td>
<td valign="top" align="left">CNN</td>
<td valign="top" align="left">Robust multicenter generalization</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B51">51</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">20</td>
<td valign="top" align="left">Rashid &#x0026; Gaghor (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Bone quantity assessment</td>
<td valign="top" align="left">DL quantification</td>
<td valign="top" align="left">Accurate cross-sectional measurements</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B57">57</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">21</td>
<td valign="top" align="left">Widiasri et al. (2023)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Bone &#x0026; canal segmentation</td>
<td valign="top" align="left">U-Net</td>
<td valign="top" align="left">High Dice score; reliable segmentation</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B58">58</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">22</td>
<td valign="top" align="left">Naufal et al. (2024)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">3D reconstruction</td>
<td valign="top" align="left">YOLOv8 segmentation</td>
<td valign="top" align="left">Sub-mm accuracy vs. reference</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B56">56</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">23</td>
<td valign="top" align="left">Zhang et al. (2025)</td>
<td valign="top" align="left">Panoramic</td>
<td valign="top" align="left">Furcation classification</td>
<td valign="top" align="left">Vision Transformer</td>
<td valign="top" align="left">AUC &#x003E;0.95</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B59">59</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">24</td>
<td valign="top" align="left">Zhou et al. (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Tooth instance segmentation</td>
<td valign="top" align="left">Open DL framework</td>
<td valign="top" align="left">Improved cross-task transferability</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B47">47</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">25</td>
<td valign="top" align="left">Chen et al. (2023)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Dental segmentation</td>
<td valign="top" align="left">CTA-UNet (CNN&#x2013;Transformer)</td>
<td valign="top" align="left">Outperformed U-Net baseline</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B48">48</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">26</td>
<td valign="top" align="left">Zhao et al. (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Implant classification/segmentation</td>
<td valign="top" align="left">Multi-task learning</td>
<td valign="top" align="left">AUC 0.94; effective joint learning</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B49">49</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">27</td>
<td valign="top" align="left">Liu et al. (2023)</td>
<td valign="top" align="left">CBCT&#x2009;&#x002B;&#x2009;Intraoral</td>
<td valign="top" align="left">Multimodal 3D fusion</td>
<td valign="top" align="left">DL fusion</td>
<td valign="top" align="left">Superior structural reconstruction</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B46">46</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">28</td>
<td valign="top" align="left">Mao et al. (2025)</td>
<td valign="top" align="left">Intraoral Photo</td>
<td valign="top" align="left">Periodontitis detection</td>
<td valign="top" align="left">Systematic review</td>
<td valign="top" align="left">AUC 0.80&#x2013;0.93; variable standards</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B14">14</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">29</td>
<td valign="top" align="left">Tao et al. (2025)</td>
<td valign="top" align="left">Intraoral Photo</td>
<td valign="top" align="left">Periodontitis screening</td>
<td valign="top" align="left">DL photo processing</td>
<td valign="top" align="left">AUC 0.93; high sensitivity</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B53">53</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">30</td>
<td valign="top" align="left">Wen et al. (2024)</td>
<td valign="top" align="left">Intraoral Photo</td>
<td valign="top" align="left">Gingival inflammation grading</td>
<td valign="top" align="left">CNN with removal strategy</td>
<td valign="top" align="left">Accuracy 0.84&#x2013;0.88</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B54">54</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">31</td>
<td valign="top" align="left">Felsch et al. (2023)</td>
<td valign="top" align="left">Intraoral Photo</td>
<td valign="top" align="left">Caries/hypomineralization detection</td>
<td valign="top" align="left">Vision Transformer</td>
<td valign="top" align="left">AUROC 0.93</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B52">52</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">32</td>
<td valign="top" align="left">Yadalam et al. (2025)</td>
<td valign="top" align="left">Periapical</td>
<td valign="top" align="left">External validation of bone loss classifier</td>
<td valign="top" align="left">Dual-embedding few-shot CNN</td>
<td valign="top" align="left">Sensitivity &#x003E;0.90</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B38">38</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">33</td>
<td valign="top" align="left">Kot et al. (2025)</td>
<td valign="top" align="left">CBCT/CT</td>
<td valign="top" align="left">Tooth segmentation meta-analysis</td>
<td valign="top" align="left">Systematic meta-analysis</td>
<td valign="top" align="left">Pooled Dice &#x2248;0.92</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B32">32</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">34</td>
<td valign="top" align="left">Baena-de la Iglesia et al. (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">External root resorption quantification</td>
<td valign="top" align="left">AI-aided 3D analysis</td>
<td valign="top" align="left">High volumetric agreement</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B30">30</xref>)</td>
</tr>
<tr>
<td valign="top" align="left">35</td>
<td valign="top" align="left">da Andrade-Bortoletto et al. (2025)</td>
<td valign="top" align="left">CBCT</td>
<td valign="top" align="left">Mandibular canal segmentation</td>
<td valign="top" align="left">Comparative validation</td>
<td valign="top" align="left">Accuracy &#x003E;0.90</td>
<td valign="top" align="left">(<xref ref-type="bibr" rid="B31">31</xref>)</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Hybrid deep learning models that first segment alveolar bone or tooth structures and then classify bone loss severity or disease stage have increasingly outperformed classification-only approaches, especially in quantification tasks. For example, HYNETS (Kabir et al.) reports AUC&#x2009;&#x2248;&#x2009;0.97 for stage grading from periapical radiographs, using entangled segmentation and classification modules (<xref ref-type="bibr" rid="B39">39</xref>). More recently, Mei et al. (<xref ref-type="bibr" rid="B37">37</xref>) introduced a clinical knowledge&#x2013;guided hybrid classification network combining localization and classification branches to improve periodontal diagnosis (<xref ref-type="bibr" rid="B37">37</xref>). Another work by Widyaningrum et al. (<xref ref-type="bibr" rid="B40">40</xref>) demonstrated a two-stage pipeline (detect&#x2009;&#x2192;&#x2009;classify) for periodontal bone loss detection and staging (<xref ref-type="bibr" rid="B40">40</xref>). Explainability tools such as Grad-CAM and clinician-in-the-loop interfaces have enhanced interpretability and acceptance among practitioners&#x0027; trust in AI-assisted periodontal diagnostics (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B36">36</xref>). A recent systematic review found that AI-based PBL detection on periapical radiographs consistently achieved high specificity (0.91&#x2013;0.98) and improved measurement reproducibility compared to manual assessments (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B42">42</xref>).</p>
</sec>
<sec id="s2b"><label>2.2</label><title>Panoramic radiographs and CBCT</title>
<p>Panoramic radiographs offer wide coverage but have inherent distortions and overlapping structures, challenging AI detection accuracy. CNN models trained on panoramic images have achieved accuracies around 0.80&#x2013;0.84, with balanced sensitivity and specificity (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B20">20</xref>). Large dataset studies (e.g., Kurt-Bayrakdar et al. (<xref ref-type="bibr" rid="B13">13</xref>) show that deep learning models can approach or match clinician diagnostic performance for bone loss classification using panoramic radiographs, although performance drops in certain defect types such as vertical bone loss, areas in posterior regions, or where anatomical overlap is substantial (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B16">16</xref>, <xref ref-type="bibr" rid="B43">43</xref>).</p>
<p>CBCT provides volumetric information that allows AI models to evaluate complex periodontal structures in three dimensions. Recent CBCT-based investigations, including high-resolution CBCT texture-analysis work (<xref ref-type="bibr" rid="B71">71</xref>) have demonstrated that deep CNNs and advanced segmentation architectures can achieve high diagnostic performance across selected periodontal tasks. For instance, a ResNet101V2 model achieved approximately 0.91 test accuracy and an AUC of 0.98 for detecting furcation involvement on axial CBCT slices, while segmentation models have reported AUC values approaching 0.96 for total alveolar bone loss detection. Although performance varies across tasks and datasets, these findings highlight the strong potential of AI for volumetric periodontal assessment and underscore the need for larger, multicenter validation studies (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B44">44</xref>, <xref ref-type="bibr" rid="B45">45</xref>). Recent work has explored multimodal fusion, combining CBCT with other imaging sources. For example, a deep-learning fusion model integrating CBCT and intraoral mesh scans achieved superior accuracy for structural reconstruction, and frameworks combining CBCT&#x002B; panoramic registration have shown improved detection of furcation and alveolar bone patterns compared with single modalities (<xref ref-type="bibr" rid="B46">46</xref>, <xref ref-type="bibr" rid="B47">47</xref>).</p>
<p>Recent CNN&#x2013;transformer hybrid and multi-task frameworks have improved CBCT performance by integrating segmentation, classification and measurement into unified pipelines; examples include CTA-UNet and Swin/UNetR-based systems that leverage global attention for improved tooth/ bone segmentation and downstream classification (<xref ref-type="bibr" rid="B47">47</xref>&#x2013;<xref ref-type="bibr" rid="B49">49</xref>). External-validation studies on multicenter CBCT datasets have demonstrated robust generalizability for anatomical localization and segmentation tasks (e.g., mandibular canal localization, periodontal defect segmentation), supporting cross-site deployment after appropriate validation (<xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B51">51</xref>).</p>
</sec>
<sec id="s2c"><label>2.3</label><title>Intraoral photographs</title>
<p>Intraoral photographic applications are gaining traction, especially for screening and teledentistry settings where radiographic facilities may be limited. Reported diagnostic accuracies vary widely (&#x2248;0.46&#x2013;1.00), primarily because of inconsistent imaging protocols, variable lighting conditions, and heterogeneous reference standards (<xref ref-type="bibr" rid="B14">14</xref>). Recent studies using standardized imaging workflows and modern CNN/transformer or hybrid CNN-transformer architectures have improved performance in controlled settings&#x2014;for example, a ResNet50-based multi-instance model reached an AUROC of 0.93 on internal and external tests for stage II&#x2013;IV periodontitis, and DenseNet/CNN models have reported AUROCs around 0.80&#x2013;0.84 for gingival inflammation grading&#x2014;indicating that protocol standardization plus modern architectures can substantially increase photographic diagnostic accuracy (<xref ref-type="bibr" rid="B52">52</xref>&#x2013;<xref ref-type="bibr" rid="B54">54</xref>).</p>
<p>A systematic review by Mao et al. (<xref ref-type="bibr" rid="B14">14</xref>) highlighted the promise of AI for photographic detection of periodontal disease but emphasized the urgent need for standardized acquisition protocols and multicenter datasets (<xref ref-type="bibr" rid="B14">14</xref>). Clinician-in-the-loop systems are particularly relevant here, allowing AI to serve as an assistive tool for remote triaging rather than as an autonomous diagnostic system.</p>
</sec>
<sec id="s2d"><label>2.4</label><title>Themes by diagnostic task</title>
<p>Grouping studies by diagnostic task provides insight into how AI performance varies according to the clinical endpoint:
<list list-type="simple">
<list-item>
<p>Periodontal Bone Loss Detection: Most studies focus here, particularly on periapical and panoramic radiographs. CNNs and hybrid segmentation-classification architectures achieve high specificity (&#x003E;0.90) and good sensitivity, especially for moderate-to-severe defects (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B40">40</xref>, <xref ref-type="bibr" rid="B55">55</xref>).</p></list-item>
<list-item>
<p>Alveolar Bone Measurement: Multi-task deep learning models using YOLOv8 and ResNet backbones have demonstrated sub-millimeter error in alveolar crest measurement compared to CBCT gold standards (<xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B56">56</xref>&#x2013;<xref ref-type="bibr" rid="B58">58</xref>).</p></list-item>
<list-item>
<p>Furcation Involvement: CBCT-based CNNs and multimodal fusion approaches outperform panoramic models, achieving AUC values above 0.95 (<xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B45">45</xref>, <xref ref-type="bibr" rid="B59">59</xref>).</p></list-item>
<list-item>
<p>Periapical Lesion Detection: Transformer-based networks validated on multinational datasets showed superior sensitivity and external generalizability compared to baseline CNNs (<xref ref-type="bibr" rid="B17">17</xref>, <xref ref-type="bibr" rid="B60">60</xref>, <xref ref-type="bibr" rid="B61">61</xref>).</p></list-item>
</list>This task-based perspective underscores that AI performance is highest for volumetric and measurement tasks using CBCT, while 2D modalities remain highly useful for screening and routine clinical support.</p>
</sec>
<sec id="s2e"><label>2.5</label><title>Themes by AI architecture</title>
<p>Early studies relied mainly on standard CNN architectures (e.g., U-Net, ResNet) for either segmentation or classification. Over time, three major trends have emerged:
<list list-type="simple">
<list-item>
<p>Segmentation&#x2013;Classification Hybrids: Hybrid models combining segmentation and classification have improved bone contour localization and bone loss quantification, particularly in periapical radiographs. Notable examples include Chang et al. (<xref ref-type="bibr" rid="B62">62</xref>), Sunnetci et al. (<xref ref-type="bibr" rid="B63">63</xref>), Dujic et al. (<xref ref-type="bibr" rid="B64">64</xref>), and Widyaningrum et al. (<xref ref-type="bibr" rid="B40">40</xref>), which applied CNN-based hybrids for periodontal bone loss assessment, and the U-Net architecture (<xref ref-type="bibr" rid="B29">29</xref>) as a foundational segmentation framework.</p></list-item>
<list-item>
<p>Transformer-Based Models: Enhanced contextual understanding, particularly in panoramic and CBCT tasks, improving AUC by 3&#x0025;&#x2013;5&#x0025; over CNN baselines (<xref ref-type="bibr" rid="B65">65</xref>&#x2013;<xref ref-type="bibr" rid="B67">67</xref>).</p></list-item>
<list-item>
<p>Clinician-in-the-Loop &#x0026; Explainable AI: Integration of Grad-CAM, attention maps, and uncertainty quantification to increase interpretability and user trust (<xref ref-type="bibr" rid="B68">68</xref>&#x2013;<xref ref-type="bibr" rid="B70">70</xref>).</p></list-item>
</list>Recent methodological frameworks such as TRIPOD-AI and STARD-AI have influenced model development and reporting, encouraging external validation, clear reference standards, and transparent evaluation metrics (<xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B71">71</xref>).</p>
</sec>
</sec>
<sec id="s3" sec-type="discussion"><label>3</label><title>Discussion</title>
<p>Determinants of Diagnostic Performance:</p>
<p>Imaging Modality and Anatomical Complexity: CBCT provides volumetric information that enables superior AI performance in furcation detection and alveolar bone measurements, with accuracies up to 0.91 and AUC values above 0.95 (<xref ref-type="bibr" rid="B37">37</xref>). U-Net architectures (<xref ref-type="bibr" rid="B29">29</xref>) provide the foundational segmentation framework, while reporting standards such as TRIPOD-AI and STARD-AI (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B72">72</xref>) ensure transparent evaluation and external validation. Panoramic and intraoral photographs, by contrast, are more prone to distortions and variability, resulting in wider performance ranges (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B36">36</xref>).</p>
<p>Dataset Diversity and External Validation: Earlier single-center studies often reported high internal accuracies that did not generalize when evaluated on external datasets. Recent validation work has reinforced this limitation. For example, Hadzic et al. (<xref ref-type="bibr" rid="B60">60</xref>) demonstrated that a periapical lesion detection CNN trained on one dataset showed reduced performance when tested on an independent, clinically representative CBCT dataset, underscoring the impact of real-world variability (<xref ref-type="bibr" rid="B60">60</xref>). More recent multicenter and cross-architecture investigations have emphasized similar issues. The comparative study by Schneider et al. (<xref ref-type="bibr" rid="B73">73</xref>), evaluating CNNs, transformers, and hybrid models across multiple institutions, showed that diagnostic accuracy and model calibration varied substantially when models were applied across heterogeneous datasets (<xref ref-type="bibr" rid="B73">73</xref>). Likewise, Yadalam et al. (<xref ref-type="bibr" rid="B38">38</xref>) highlighted the sensitivity of AI systems to dataset diversity, noting that their few-shot learning framework for bone-loss classification on OPG data remained highly dependent on training-domain characteristics and would require rigorous external validation for broader deployment (<xref ref-type="bibr" rid="B38">38</xref>). Collectively, these findings demonstrate that population differences, device heterogeneity, and broader domain-shift effects can significantly degrade performance, reinforcing the need for robust, multi-institutional external validation to ensure real-world applicability.</p>
<p>Reference Standards: Studies using CBCT or clinically validated measurements as reference standards consistently outperform those relying on single-expert or 2D radiographic assessments. The choice of gold standard directly impacts reported sensitivity and specificity, especially for alveolar bone measurements and furcation classification (<xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B37">37</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B42">42</xref>).</p>
<p>Methodological Rigor and Reporting Quality: Adherence to frameworks such as STARD-AI and TRIPOD-AI has increased over the past two years, leading to more transparent study designs and reproducible results (<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B72">72</xref>). Explainability features and uncertainty quantification also play a role in clinician acceptance (<xref ref-type="bibr" rid="B23">23</xref>, <xref ref-type="bibr" rid="B36">36</xref>).</p>
<sec id="s3a"><label>3.1</label><title>Clinical implications</title>
<p>AI has significant potential to enhance periodontal diagnosis through standardized measurements, workflow efficiency, and decision support. High specificity in periapical radiograph applications may reduce inter-examiner variability and improve longitudinal monitoring in both specialist and general practice settings (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B18">18</xref>, <xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B38">38</xref>, <xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B42">42</xref>). CBCT-based AI systems, with their volumetric capabilities, are especially suited for complex diagnostic tasks such as furcation involvement and surgical planning, providing quantitative metrics that complement clinician judgment (<xref ref-type="bibr" rid="B37">37</xref>). Foundational segmentation frameworks such as U-Net (<xref ref-type="bibr" rid="B29">29</xref>) support model development, while reporting standards like TRIPOD-AI and STARD-AI (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B72">72</xref>) ensure transparency and reproducibility.</p>
<p>For panoramic and intraoral photographic modalities, AI can serve as a screening or triaging tool, particularly in primary care and teledentistry contexts where access to CBCT is limited (<xref ref-type="bibr" rid="B14">14</xref>, <xref ref-type="bibr" rid="B33">33</xref>). Clinician-in-the-loop models further ensure that AI outputs are interpreted within a professional diagnostic framework, reducing the risk of over-reliance on automated systems.</p>
</sec>
<sec id="s3b"><label>3.2</label><title>Policy and integration considerations</title>
<p>For AI to transition from research to clinical use, several policy and integration factors must be addressed:
<list list-type="simple">
<list-item>
<p>Regulatory Alignment: AI diagnostic systems should align with regulatory frameworks (e.g., FDA, EMA, local dental boards) that emphasize transparency, explainability, and post-market surveillance.</p></list-item>
<list-item>
<p>Infrastructure and Data Governance: Successful clinical integration requires secure data pipelines, federated learning frameworks, and standardized imaging protocols to enable robust external validation.</p></list-item>
<list-item>
<p>Education and Workforce Integration: Dental education must include AI literacy to ensure clinicians can critically interpret AI outputs and integrate them into diagnostic reasoning.</p></list-item>
</list>These considerations are consistent with trends in medical imaging AI, where regulatory and infrastructural readiness significantly influences clinical adoption (<xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B28">28</xref>, <xref ref-type="bibr" rid="B68">68</xref>, <xref ref-type="bibr" rid="B72">72</xref>).</p>
</sec>
</sec>
<sec id="s4"><label>4</label><title>Limitations</title>
<p>Despite rapid advancements, the evidence base remains limited by several factors:
<list list-type="simple">
<list-item>
<p>Heterogeneity of Study Design: Differences in dataset size, anatomical sites, labeling strategies, and evaluation metrics complicate direct comparisons across studies.</p></list-item>
<list-item>
<p>Limited Prospective Trials: Most included studies are retrospective; few prospective or real-time clinical validation studies exist.</p></list-item>
<list-item>
<p>Variable Reporting Standards: Although STARD-AI and TRIPOD-AI adoption is improving, many studies still lack clear descriptions of preprocessing, validation, or error analysis.</p></list-item>
<list-item>
<p>Underrepresentation of Certain Populations: Multicenter datasets remain geographically concentrated, raising questions about generalizability to diverse patient groups.</p></list-item>
</list>Addressing these limitations will require coordinated methodological efforts, standardization, and regulatory oversight.</p>
<p>Beyond methodological constraints, several additional sources of bias must be considered in AI-assisted periodontal diagnosis. Dataset bias&#x2014;including imbalances in disease prevalence, demographic distribution, and imaging conditions&#x2014;can lead models to learn spurious associations rather than true pathological features. Reference-standard variability, particularly when ground-truth labels are derived from heterogeneous clinical judgments or non-standardized criteria, further undermines model reliability. Even clinician-provided annotations may lack consistency without calibration exercises, resulting in systematic differences in &#x201C;ground-truth&#x201D; labels that propagate into model errors. In conjunction with these factors, the risk of automation bias remains a major concern: clinicians may over-rely on algorithmic outputs, accept incorrect AI predictions, or overlook contradictory clinical evidence. This phenomenon has been documented in medical AI settings, where over-trust in automated systems can result in diagnostic complacency, reduced critical appraisal, and delayed error detection&#x2014;especially when models encounter out-of-distribution cases (<xref ref-type="bibr" rid="B69">69</xref>&#x2013;<xref ref-type="bibr" rid="B71">71</xref>). Implementing clinician-in-the-loop workflows, standardized labeling protocols, calibration sessions for annotators, and explainable AI interfaces is therefore essential to mitigate these risks and preserve clinical accountability in AI-supported periodontal decision-making.</p>
<sec id="s4a"><label>4.1</label><title>Future directions</title>
<p>Future research should prioritize:
<list list-type="simple">
<list-item>
<p>Multicenter Prospective Clinical Trials using standardized protocols and external validation to evaluate AI under real-world conditions.</p></list-item>
<list-item>
<p>Task-Specific Architectures, including transformer-based multi-task networks, to improve performance on complex volumetric tasks.</p></list-item>
<list-item>
<p>Explainable and Uncertainty-Aware AI, fostering clinician trust and regulatory compliance.</p></list-item>
<list-item>
<p>Integration of AI into Longitudinal Care, enabling dynamic tracking of periodontal changes over time and personalized treatment planning.</p></list-item>
</list>These directions align with broader trends in AI for medical imaging and offer a roadmap for meaningful clinical integration in periodontics.</p>
</sec>
</sec>
<sec id="s5" sec-type="conclusions"><label>5</label><title>Conclusions</title>
<p>Artificial intelligence has progressed rapidly in periodontal diagnostics over the past five years, moving from proof-of-concept studies to externally validated, multicenter applications across multiple imaging modalities. CNN-based systems for periapical radiographs consistently achieve high specificity and accuracy, supporting their use for standardized measurements and clinical decision support. CBCT-based models offer the highest diagnostic performance for volumetric tasks, including alveolar bone measurement and furcation involvement detection, while panoramic and intraoral photographic applications provide valuable tools for screening and triage, particularly in primary care and teledentistry settings.</p>
<p>Despite these advances, methodological heterogeneity, limited prospective evidence, and variable reporting standards remain barriers to widespread clinical adoption. The integration of reporting frameworks such as STARD-AI and TRIPOD-AI, combined with explainable AI techniques and clinician-in-the-loop workflows, offers a realistic pathway toward safe and generalizable deployment.</p>
<p>Future efforts should focus on multicenter prospective trials, regulatory alignment, and standardized imaging protocols to ensure robust, equitable, and clinically meaningful integration of AI systems into periodontal practice. AI should be viewed not as a replacement for clinician expertise but as a complementary tool that enhances diagnostic accuracy, consistency, and patient care.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="author-contributions"><title>Author contributions</title>
<p>AA: Conceptualization, Formal analysis, Investigation, Resources, Writing &#x2013; original draft, Writing &#x2013; review &#x0026; editing, Methodology, Validation.</p>
</sec>
<sec id="s9" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s10" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s12" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s11" sec-type="supplementary-material"><title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fdmed.2026.1729825/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fdmed.2026.1729825/full&#x0023;supplementary-material</ext-link></p>
<supplementary-material xlink:href="Datasheet1.pdf" id="SM1" mimetype="application/pdf"/>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kassebaum</surname> <given-names>NJ</given-names></name> <name><surname>Bernab&#x00E9;</surname> <given-names>E</given-names></name> <name><surname>Dahiya</surname> <given-names>M</given-names></name> <name><surname>Bhandari</surname> <given-names>B</given-names></name> <name><surname>Murray</surname> <given-names>CJ</given-names></name> <name><surname>Marcenes</surname> <given-names>W</given-names></name></person-group>. <article-title>Global burden of severe periodontitis in 1990&#x2013;2010: a systematic review and meta-regression</article-title>. <source>J Dent Res</source>. (<year>2014</year>) <volume>93</volume>(<issue>11</issue>):<fpage>1045</fpage>&#x2013;<lpage>53</lpage>. <pub-id pub-id-type="doi">10.1177/0022034514552491</pub-id><pub-id pub-id-type="pmid">25261053</pub-id></mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Petersen</surname> <given-names>PE</given-names></name> <name><surname>Ogawa</surname> <given-names>H</given-names></name></person-group>. <article-title>The global burden of periodontal disease: towards integration with chronic disease prevention and control</article-title>. <source>Periodontol 2000</source>. (<year>2012</year>) <volume>60</volume>(<issue>1</issue>):<fpage>15</fpage>&#x2013;<lpage>39</lpage>. <pub-id pub-id-type="doi">10.1111/j.1600-0757.2011.00425.x</pub-id><pub-id pub-id-type="pmid">22909104</pub-id></mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chakrapani</surname> <given-names>S</given-names></name> <name><surname>Sirisha</surname> <given-names>K</given-names></name> <name><surname>Srilalitha</surname> <given-names>A</given-names></name> <name><surname>Srinivas</surname> <given-names>M</given-names></name></person-group>. <article-title>Choice of diagnostic and therapeutic imaging in periodontics and implantology</article-title>. <source>J Indian Soc Periodontol</source>. (<year>2013</year>) <volume>17</volume>(<issue>6</issue>):<fpage>711</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.4103/0972-124x.124474</pub-id><pub-id pub-id-type="pmid">24554878</pub-id></mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mol</surname> <given-names>A</given-names></name></person-group>. <article-title>Imaging methods in periodontology</article-title>. <source>Periodontol 2000</source>. (<year>2004</year>) <volume>34</volume>:<fpage>34</fpage>&#x2013;<lpage>48</lpage>. <pub-id pub-id-type="doi">10.1046/j.0906-6713.2003.003423.x</pub-id><pub-id pub-id-type="pmid">14717854</pub-id></mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Papapanou</surname> <given-names>PN</given-names></name> <name><surname>Sanz</surname> <given-names>M</given-names></name> <name><surname>Buduneli</surname> <given-names>N</given-names></name> <name><surname>Dietrich</surname> <given-names>T</given-names></name> <name><surname>Feres</surname> <given-names>M</given-names></name> <name><surname>Fine</surname> <given-names>DH</given-names></name><etal/></person-group> <article-title>Periodontitis: consensus report of workgroup 2 of the 2017 world workshop on the classification of periodontal and peri-implant diseases and conditions</article-title>. <source>J Clin Periodontol</source>. (<year>2018</year>) <volume>45</volume>(<issue>Suppl 20</issue>):<fpage>S162</fpage>&#x2013;<lpage>s70</lpage>. <pub-id pub-id-type="doi">10.1111/jcpe.12946</pub-id><pub-id pub-id-type="pmid">29926490</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>JH</given-names></name> <name><surname>Kim</surname> <given-names>DH</given-names></name> <name><surname>Jeong</surname> <given-names>SN</given-names></name> <name><surname>Choi</surname> <given-names>SH</given-names></name></person-group>. <article-title>Detection and diagnosis of dental caries using a deep learning-based convolutional neural network algorithm</article-title>. <source>J Dent</source>. (<year>2018</year>) <volume>77</volume>:<fpage>106</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1016/j.jdent.2018.07.015</pub-id><pub-id pub-id-type="pmid">30056118</pub-id></mixed-citation></ref>
<ref id="B7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lundervold</surname> <given-names>AS</given-names></name> <name><surname>Lundervold</surname> <given-names>A</given-names></name></person-group>. <article-title>An overview of deep learning in medical imaging focusing on MRI</article-title>. <source>Z Med Phys</source>. (<year>2019</year>) <volume>29</volume>(<issue>2</issue>):<fpage>102</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.1016/j.zemedi.2018.11.002</pub-id><pub-id pub-id-type="pmid">30553609</pub-id></mixed-citation></ref>
<ref id="B8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schwendicke</surname> <given-names>F</given-names></name> <name><surname>Samek</surname> <given-names>W</given-names></name> <name><surname>Krois</surname> <given-names>J</given-names></name></person-group>. <article-title>Artificial intelligence in dentistry: chances and challenges</article-title>. <source>J Dent Res</source>. (<year>2020</year>) <volume>99</volume>(<issue>7</issue>):<fpage>769</fpage>&#x2013;<lpage>74</lpage>. <pub-id pub-id-type="doi">10.1177/0022034520915714</pub-id><pub-id pub-id-type="pmid">32315260</pub-id></mixed-citation></ref>
<ref id="B9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Topol</surname> <given-names>EJ</given-names></name></person-group>. <article-title>High-performance medicine: the convergence of human and artificial intelligence</article-title>. <source>Nat Med</source>. (<year>2019</year>) <volume>25</volume>(<issue>1</issue>):<fpage>44</fpage>&#x2013;<lpage>56</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-018-0300-7</pub-id><pub-id pub-id-type="pmid">30617339</pub-id></mixed-citation></ref>
<ref id="B10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>AlGhaihab</surname> <given-names>A</given-names></name> <name><surname>Moretti</surname> <given-names>AJ</given-names></name> <name><surname>Reside</surname> <given-names>J</given-names></name> <name><surname>Tuzova</surname> <given-names>L</given-names></name> <name><surname>Huang</surname> <given-names>YS</given-names></name> <name><surname>Tyndall</surname> <given-names>DA</given-names></name></person-group>. <article-title>Automatic detection of radiographic alveolar bone loss in bitewing and periapical intraoral radiographs using deep learning technology: a preliminary evaluation</article-title>. <source>Diagnostics (Basel)</source>. (<year>2025</year>) <volume>15</volume>(<issue>5</issue>):<fpage>576</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics15050576</pub-id><pub-id pub-id-type="pmid">40075823</pub-id></mixed-citation></ref>
<ref id="B11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>&#x00C7;elik</surname> <given-names>B</given-names></name> <name><surname>Sava&#x015F;taer</surname> <given-names>EF</given-names></name> <name><surname>Kaya</surname> <given-names>HI</given-names></name> <name><surname>&#x00C7;elik</surname> <given-names>ME</given-names></name></person-group>. <article-title>The role of deep learning for periapical lesion detection on panoramic radiographs</article-title>. <source>Dentomaxillofac Radiol</source>. (<year>2023</year>) <volume>52</volume>(<issue>8</issue>):<fpage>20230118</fpage>. <pub-id pub-id-type="doi">10.1259/dmfr.20230118</pub-id></mixed-citation></ref>
<ref id="B12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hoss</surname> <given-names>P</given-names></name> <name><surname>Meyer</surname> <given-names>O</given-names></name> <name><surname>W&#x00F6;lfle</surname> <given-names>UC</given-names></name> <name><surname>W&#x00FC;lk</surname> <given-names>A</given-names></name> <name><surname>Meusburger</surname> <given-names>T</given-names></name> <name><surname>Meier</surname> <given-names>L</given-names></name><etal/></person-group> <article-title>Detection of periodontal bone loss on periapical radiographs-A diagnostic study using different convolutional neural networks</article-title>. <source>J Clin Med</source>. (<year>2023</year>) <volume>12</volume>(<issue>22</issue>):<fpage>7189</fpage>. <pub-id pub-id-type="doi">10.3390/jcm12227189</pub-id><pub-id pub-id-type="pmid">38002799</pub-id></mixed-citation></ref>
<ref id="B13"><label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kurt-Bayrakdar</surname> <given-names>S</given-names></name> <name><surname>Bayrakdar</surname> <given-names>&#x0130;</given-names></name> <name><surname>Kuran</surname> <given-names>A</given-names></name> <name><surname>&#x00C7;elik</surname> <given-names>&#x00D6;</given-names></name> <name><surname>Orhan</surname> <given-names>K</given-names></name> <name><surname>Jagtap</surname> <given-names>R</given-names></name></person-group>. <article-title>Advancing periodontal diagnosis: harnessing advanced artificial intelligence for patterns of periodontal bone loss in cone-beam computed tomography</article-title>. <source>Dentomaxillofac Radiol</source>. (<year>2025</year>) <volume>54</volume>(<issue>4</issue>):<fpage>268</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1093/dmfr/twaf011</pub-id><pub-id pub-id-type="pmid">39908459</pub-id></mixed-citation></ref>
<ref id="B14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mao</surname> <given-names>K</given-names></name> <name><surname>Thu</surname> <given-names>KM</given-names></name> <name><surname>Hung</surname> <given-names>KF</given-names></name> <name><surname>Yu</surname> <given-names>OY</given-names></name> <name><surname>Hsung</surname> <given-names>RT</given-names></name> <name><surname>Lam</surname> <given-names>WY</given-names></name></person-group>. <article-title>Artificial intelligence in detecting periodontal disease from intraoral photographs: a systematic review</article-title>. <source>Int Dent J</source>. (<year>2025</year>) <volume>75</volume>(<issue>5</issue>):<fpage>100883</fpage>. <pub-id pub-id-type="doi">10.1016/j.identj.2025.100883</pub-id><pub-id pub-id-type="pmid">40639137</pub-id></mixed-citation></ref>
<ref id="B15"><label>15.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tariq</surname> <given-names>A</given-names></name> <name><surname>Nakhi</surname> <given-names>FB</given-names></name> <name><surname>Salah</surname> <given-names>F</given-names></name> <name><surname>Eltayeb</surname> <given-names>G</given-names></name> <name><surname>Abdulla</surname> <given-names>GJ</given-names></name> <name><surname>Najim</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>Efficiency and accuracy of artificial intelligence in the radiographic detection of periodontal bone loss: a systematic review</article-title>. <source>Imaging Sci Dent</source>. (<year>2023</year>) <volume>53</volume>(<issue>3</issue>):<fpage>193</fpage>&#x2013;<lpage>98</lpage>. <pub-id pub-id-type="doi">10.5624/isd.20230092</pub-id><pub-id pub-id-type="pmid">37799746</pub-id></mixed-citation></ref>
<ref id="B16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Xue</surname> <given-names>T</given-names></name> <name><surname>Chen</surname> <given-names>L</given-names></name> <name><surname>Sun</surname> <given-names>Q</given-names></name></person-group>. <article-title>Deep learning method to automatically diagnose periodontal bone loss and periodontitis stage in dental panoramic radiograph</article-title>. <source>J Dent</source>. (<year>2024</year>) <volume>150</volume>:<fpage>105373</fpage>. <pub-id pub-id-type="doi">10.1016/j.jdent.2024.105373</pub-id><pub-id pub-id-type="pmid">39332519</pub-id></mixed-citation></ref>
<ref id="B17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Liu</surname> <given-names>X</given-names></name> <name><surname>Shao</surname> <given-names>Y</given-names></name> <name><surname>Gao</surname> <given-names>Y</given-names></name> <name><surname>Pan</surname> <given-names>K</given-names></name> <name><surname>Jin</surname> <given-names>C</given-names></name><etal/></person-group> <article-title>Periapical lesion detection in periapical radiographs using the latest convolutional neural network ConvNeXt and its integrated models</article-title>. <source>Sci Rep</source>. (<year>2024</year>) <volume>14</volume>(<issue>1</issue>):<fpage>25429</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-024-75748-9</pub-id><pub-id pub-id-type="pmid">39455655</pub-id></mixed-citation></ref>
<ref id="B18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Iacob</surname> <given-names>AM</given-names></name> <name><surname>Castrill&#x00F3;n Fern&#x00E1;ndez</surname> <given-names>M</given-names></name> <name><surname>Fern&#x00E1;ndez Robledo</surname> <given-names>L</given-names></name> <name><surname>Barbeito Castro</surname> <given-names>E</given-names></name> <name><surname>Escobedo Mart&#x00ED;nez</surname> <given-names>MF</given-names></name></person-group>. <article-title>Automated detection of periodontal bone loss in two-dimensional (2D) radiographs using artificial intelligence: a systematic review</article-title>. <source>Dent J (Basel)</source>. (<year>2025</year>) <volume>13</volume>(<issue>9</issue>):<fpage>413</fpage>. <pub-id pub-id-type="doi">10.3390/dj13090413</pub-id><pub-id pub-id-type="pmid">41002686</pub-id></mixed-citation></ref>
<ref id="B19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jundaeng</surname> <given-names>J</given-names></name> <name><surname>Chamchong</surname> <given-names>R</given-names></name> <name><surname>Nithikathkul</surname> <given-names>C</given-names></name></person-group>. <article-title>Periodontitis diagnosis: a review of current and future trends in artificial intelligence</article-title>. <source>Technol Health Care</source>. (<year>2025</year>) <volume>33</volume>(<issue>1</issue>):<fpage>473</fpage>&#x2013;<lpage>84</lpage>. <pub-id pub-id-type="doi">10.3233/thc-241169</pub-id><pub-id pub-id-type="pmid">39302402</pub-id></mixed-citation></ref>
<ref id="B20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chatzopoulos</surname> <given-names>GS</given-names></name> <name><surname>Koidou</surname> <given-names>VP</given-names></name> <name><surname>Tsalikis</surname> <given-names>L</given-names></name> <name><surname>Kaklamanos</surname> <given-names>EG</given-names></name></person-group>. <article-title>Artificial intelligence for detection and classification of furcation defects using radiographic imaging: a systematic review</article-title>. <source>Imaging Sci Dent</source>. (<year>2025</year>) <volume>55</volume>:<fpage>322</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.5624/isd.20250101</pub-id><pub-id pub-id-type="pmid">41536891</pub-id></mixed-citation></ref>
<ref id="B21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chau</surname> <given-names>RCW</given-names></name> <name><surname>Li</surname> <given-names>GH</given-names></name> <name><surname>Tew</surname> <given-names>IM</given-names></name> <name><surname>Thu</surname> <given-names>KM</given-names></name> <name><surname>McGrath</surname> <given-names>C</given-names></name> <name><surname>Lo</surname> <given-names>WL</given-names></name><etal/></person-group> <article-title>Accuracy of artificial intelligence-based photographic detection of gingivitis</article-title>. <source>Int Dent J</source>. (<year>2023</year>) <volume>73</volume>(<issue>5</issue>):<fpage>724</fpage>&#x2013;<lpage>30</lpage>. <pub-id pub-id-type="doi">10.1016/j.identj.2023.03.007</pub-id><pub-id pub-id-type="pmid">37117096</pub-id></mixed-citation></ref>
<ref id="B22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chau</surname> <given-names>RCW</given-names></name> <name><surname>Cheng</surname> <given-names>ACC</given-names></name> <name><surname>Mao</surname> <given-names>K</given-names></name> <name><surname>Thu</surname> <given-names>KM</given-names></name> <name><surname>Ling</surname> <given-names>Z</given-names></name> <name><surname>Tew</surname> <given-names>IM</given-names></name><etal/></person-group> <article-title>External validation of an AI mHealth tool for gingivitis detection among older adults at daycare centers: a pilot study</article-title>. <source>Int Dent J</source>. (<year>2025</year>) <volume>75</volume>(<issue>3</issue>):<fpage>1970</fpage>&#x2013;<lpage>78</lpage>. <pub-id pub-id-type="doi">10.1016/j.identj.2025.01.008</pub-id><pub-id pub-id-type="pmid">39864975</pub-id></mixed-citation></ref>
<ref id="B23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sarakbi</surname> <given-names>RM</given-names></name> <name><surname>Varma</surname> <given-names>SR</given-names></name> <name><surname>Muthiah Annamma</surname> <given-names>L</given-names></name> <name><surname>Sivaswamy</surname> <given-names>V</given-names></name></person-group>. <article-title>Implications of artificial intelligence in periodontal treatment maintenance: a scoping review</article-title>. <source>Front Oral Health</source>. (<year>2025</year>) <volume>6</volume>:<fpage>1561128</fpage>. <pub-id pub-id-type="doi">10.3389/froh.2025.1561128</pub-id><pub-id pub-id-type="pmid">40438083</pub-id></mixed-citation></ref>
<ref id="B24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Collins</surname> <given-names>GS</given-names></name> <name><surname>Reitsma</surname> <given-names>JB</given-names></name> <name><surname>Altman</surname> <given-names>DG</given-names></name> <name><surname>Moons</surname> <given-names>KG</given-names></name></person-group>. <article-title>Transparent reporting of a multivariable prediction model for individual prognosis or diagnosis (TRIPOD): the TRIPOD statement</article-title>. <source>Br Med J</source>. (<year>2015</year>) <volume>350</volume>:<fpage>g7594</fpage>. <pub-id pub-id-type="doi">10.1136/bmj.g7594</pub-id></mixed-citation></ref>
<ref id="B25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sounderajah</surname> <given-names>V</given-names></name> <name><surname>Ashrafian</surname> <given-names>H</given-names></name> <name><surname>Aggarwal</surname> <given-names>R</given-names></name> <name><surname>De Fauw</surname> <given-names>J</given-names></name> <name><surname>Denniston</surname> <given-names>AK</given-names></name> <name><surname>Greaves</surname> <given-names>F</given-names></name><etal/></person-group> <article-title>Developing specific reporting guidelines for diagnostic accuracy studies assessing AI interventions: the STARD-AI steering group</article-title>. <source>Nat Med</source>. (<year>2020</year>) <volume>26</volume>(<issue>6</issue>):<fpage>807</fpage>&#x2013;<lpage>08</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-020-0941-1</pub-id><pub-id pub-id-type="pmid">32514173</pub-id></mixed-citation></ref>
<ref id="B26"><label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kelly</surname> <given-names>CJ</given-names></name> <name><surname>Karthikesalingam</surname> <given-names>A</given-names></name> <name><surname>Suleyman</surname> <given-names>M</given-names></name> <name><surname>Corrado</surname> <given-names>G</given-names></name> <name><surname>King</surname> <given-names>D</given-names></name></person-group>. <article-title>Key challenges for delivering clinical impact with artificial intelligence</article-title>. <source>BMC Med</source>. (<year>2019</year>) <volume>17</volume>(<issue>1</issue>):<fpage>195</fpage>. <pub-id pub-id-type="doi">10.1186/s12916-019-1426-2</pub-id><pub-id pub-id-type="pmid">31665002</pub-id></mixed-citation></ref>
<ref id="B27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sounderajah</surname> <given-names>V</given-names></name> <name><surname>Ashrafian</surname> <given-names>H</given-names></name> <name><surname>Golub</surname> <given-names>RM</given-names></name> <name><surname>Shetty</surname> <given-names>S</given-names></name> <name><surname>De Fauw</surname> <given-names>J</given-names></name> <name><surname>Hooft</surname> <given-names>L</given-names></name><etal/></person-group> <article-title>Developing a reporting guideline for artificial intelligence-centred diagnostic test accuracy studies: the STARD-AI protocol</article-title>. <source>BMJ Open</source>. (<year>2021</year>) <volume>11</volume>(<issue>6</issue>):<fpage>e047709</fpage>. <pub-id pub-id-type="doi">10.1136/bmjopen-2020-047709</pub-id><pub-id pub-id-type="pmid">34183345</pub-id></mixed-citation></ref>
<ref id="B28"><label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sounderajah</surname> <given-names>V</given-names></name> <name><surname>Guni</surname> <given-names>A</given-names></name> <name><surname>Liu</surname> <given-names>X</given-names></name> <name><surname>Collins</surname> <given-names>GS</given-names></name> <name><surname>Karthikesalingam</surname> <given-names>A</given-names></name> <name><surname>Markar</surname> <given-names>SR</given-names></name><etal/></person-group> <article-title>The STARD-AI reporting guideline for diagnostic accuracy studies using artificial intelligence</article-title>. <source>Nat Med</source>. (<year>2025</year>) <volume>31</volume>:<fpage>3283</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1038/s41591-025-03953-8</pub-id><pub-id pub-id-type="pmid">40954311</pub-id></mixed-citation></ref>
<ref id="B29"><label>29.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Ronneberger</surname> <given-names>O</given-names></name> <name><surname>Fischer</surname> <given-names>P</given-names></name> <name><surname>Brox</surname> <given-names>T</given-names></name></person-group>. <article-title>U-Net: convolutional networks for biomedical image segmentation</article-title> (<year>2015</year>) <fpage>234</fpage>&#x2013;<lpage>41</lpage> p.</mixed-citation></ref>
<ref id="B30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baena-de la Iglesia</surname> <given-names>T</given-names></name> <name><surname>Navarro-Fraile</surname> <given-names>E</given-names></name> <name><surname>Iglesias-Linares</surname> <given-names>A</given-names></name></person-group>. <article-title>Validation of an AI-aided 3D method for enhanced volumetric quantification of external root resorption in orthodontics</article-title>. <source>Angle Orthod</source>. (<year>2025</year>) <volume>95</volume>(<issue>5</issue>):<fpage>474</fpage>&#x2013;<lpage>82</lpage>. <pub-id pub-id-type="doi">10.2319/092324-781.1</pub-id><pub-id pub-id-type="pmid">40936626</pub-id></mixed-citation></ref>
<ref id="B31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>da Andrade-Bortoletto</surname> <given-names>MFS</given-names></name> <name><surname>Jindanil</surname> <given-names>T</given-names></name> <name><surname>Fontenele</surname> <given-names>RC</given-names></name> <name><surname>Jacobs</surname> <given-names>R</given-names></name> <name><surname>Freitas</surname> <given-names>DQ</given-names></name></person-group>. <article-title>Comparison of AI-powered tools for CBCT-based mandibular incisive canal segmentation: a validation study</article-title>. <source>Clin Oral Implants Res</source>. (<year>2025</year>) <volume>36</volume>(<issue>9</issue>):<fpage>1086</fpage>&#x2013;<lpage>94</lpage>. <pub-id pub-id-type="doi">10.1111/clr.14455</pub-id><pub-id pub-id-type="pmid">40481776</pub-id></mixed-citation></ref>
<ref id="B32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kot</surname> <given-names>WY</given-names></name> <name><surname>Au Yeung</surname> <given-names>SY</given-names></name> <name><surname>Leung</surname> <given-names>YY</given-names></name> <name><surname>Leung</surname> <given-names>PH</given-names></name> <name><surname>Yang</surname> <given-names>WF</given-names></name></person-group>. <article-title>Evolution of deep learning tooth segmentation from CT/CBCT images: a systematic review and meta-analysis</article-title>. <source>BMC Oral Health</source>. (<year>2025</year>) <volume>25</volume>(<issue>1</issue>):<fpage>800</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-025-05984-6</pub-id><pub-id pub-id-type="pmid">40420051</pub-id></mixed-citation></ref>
<ref id="B33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mure&#x0219;anu</surname> <given-names>S</given-names></name> <name><surname>Alm&#x0103;&#x0219;an</surname> <given-names>O</given-names></name> <name><surname>Hede&#x0219;iu</surname> <given-names>M</given-names></name> <name><surname>Dio&#x0219;an</surname> <given-names>L</given-names></name> <name><surname>Dinu</surname> <given-names>C</given-names></name> <name><surname>Jacobs</surname> <given-names>R</given-names></name></person-group>. <article-title>Artificial intelligence models for clinical usage in dentistry with a focus on dentomaxillofacial CBCT: a systematic review</article-title>. <source>Oral Radiol</source>. (<year>2023</year>) <volume>39</volume>(<issue>1</issue>):<fpage>18</fpage>&#x2013;<lpage>40</lpage>. <pub-id pub-id-type="doi">10.1007/s11282-022-00660-9</pub-id></mixed-citation></ref>
<ref id="B34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pishghadam</surname> <given-names>N</given-names></name> <name><surname>Esmaeilyfard</surname> <given-names>R</given-names></name> <name><surname>Paknahad</surname> <given-names>M</given-names></name></person-group>. <article-title>Explainable deep learning for age and gender estimation in dental CBCT scans using attention mechanisms and multi task learning</article-title>. <source>Sci Rep</source>. (<year>2025</year>) <volume>15</volume>(<issue>1</issue>):<fpage>18070</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-025-03305-z</pub-id><pub-id pub-id-type="pmid">40413203</pub-id></mixed-citation></ref>
<ref id="B35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jundaeng</surname> <given-names>J</given-names></name> <name><surname>Chamchong</surname> <given-names>R</given-names></name> <name><surname>Nithikathkul</surname> <given-names>C</given-names></name></person-group>. <article-title>Artificial intelligence-powered innovations in periodontal diagnosis: a new era in dental healthcare</article-title>. <source>Front Med Technol</source>. (<year>2024</year>) <volume>6</volume>:<fpage>1469852</fpage>. <pub-id pub-id-type="doi">10.3389/fmedt.2024.1469852</pub-id><pub-id pub-id-type="pmid">39866670</pub-id></mixed-citation></ref>
<ref id="B36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tuygunov</surname> <given-names>N</given-names></name> <name><surname>Samaranayake</surname> <given-names>L</given-names></name> <name><surname>Khurshid</surname> <given-names>Z</given-names></name> <name><surname>Rewthamrongsris</surname> <given-names>P</given-names></name> <name><surname>Schwendicke</surname> <given-names>F</given-names></name> <name><surname>Osathanon</surname> <given-names>T</given-names></name><etal/></person-group> <article-title>The transformative role of artificial intelligence in dentistry: a comprehensive overview part 2: the promise and perils, and the international dental federation communique</article-title>. <source>Int Dent J</source>. (<year>2025</year>) <volume>75</volume>(<issue>2</issue>):<fpage>397</fpage>&#x2013;<lpage>404</lpage>. <pub-id pub-id-type="doi">10.1016/j.identj.2025.02.006</pub-id><pub-id pub-id-type="pmid">40011130</pub-id></mixed-citation></ref>
<ref id="B37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mei</surname> <given-names>L</given-names></name> <name><surname>Deng</surname> <given-names>K</given-names></name> <name><surname>Cui</surname> <given-names>Z</given-names></name> <name><surname>Fang</surname> <given-names>Y</given-names></name> <name><surname>Li</surname> <given-names>Y</given-names></name> <name><surname>Lai</surname> <given-names>H</given-names></name><etal/></person-group> <article-title>Clinical knowledge-guided hybrid classification network for automatic periodontal disease diagnosis in x-ray image</article-title>. <source>Med Image Anal</source>. (<year>2025</year>) <volume>99</volume>:<fpage>103376</fpage>. <pub-id pub-id-type="doi">10.1016/j.media.2024.103376</pub-id><pub-id pub-id-type="pmid">39536402</pub-id></mixed-citation></ref>
<ref id="B38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Yadalam</surname> <given-names>PK</given-names></name> <name><surname>Pawar</surname> <given-names>AR</given-names></name> <name><surname>Natarajan</surname> <given-names>PM</given-names></name> <name><surname>Ardila</surname> <given-names>CM</given-names></name></person-group>. <article-title>A novel dual embedding few-shot learning approach for classifying bone loss using orthopantomogram radiographic notes</article-title>. <source>Head Face Med</source>. (<year>2025</year>) <volume>21</volume>(<issue>1</issue>):<fpage>49</fpage>. <pub-id pub-id-type="doi">10.1186/s13005-025-00528-3</pub-id><pub-id pub-id-type="pmid">40640850</pub-id></mixed-citation></ref>
<ref id="B39"><label>39.</label><mixed-citation publication-type="other"><person-group person-group-type="author"><name><surname>Kabir</surname> <given-names>T</given-names></name> <name><surname>Lee</surname> <given-names>C-T</given-names></name> <name><surname>Nelson</surname> <given-names>J</given-names></name> <name><surname>Sheng</surname> <given-names>S</given-names></name> <name><surname>Meng</surname> <given-names>H-W</given-names></name> <name><surname>Chen</surname> <given-names>L</given-names></name><etal/></person-group> <comment>An End-to-end entangled segmentation and classification convolutional neural network for periodontitis stage grading from periapical radiographic images. <italic>arXiv</italic></comment>. (<year>2021</year>). <pub-id pub-id-type="doi">10.48550/arXiv.2109.13120</pub-id></mixed-citation></ref>
<ref id="B40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Widyaningrum</surname> <given-names>R</given-names></name> <name><surname>Astuti</surname> <given-names>ER</given-names></name> <name><surname>Soetojo</surname> <given-names>A</given-names></name> <name><surname>Faadiya</surname> <given-names>AN</given-names></name> <name><surname>Nurrachman</surname> <given-names>AS</given-names></name> <name><surname>Kinanggit</surname> <given-names>ND</given-names></name><etal/></person-group> <article-title>Hybrid two-stage CNN for detection and staging of periodontitis on panoramic radiographs</article-title>. <source>J Oral Biol Craniofac Res</source>. (<year>2025</year>) <volume>15</volume>(<issue>6</issue>):<fpage>1392</fpage>&#x2013;<lpage>99</lpage>. <pub-id pub-id-type="doi">10.1016/j.jobcr.2025.08.019</pub-id><pub-id pub-id-type="pmid">40927498</pub-id></mixed-citation></ref>
<ref id="B41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>IH</given-names></name> <name><surname>Lin</surname> <given-names>CH</given-names></name> <name><surname>Lee</surname> <given-names>MK</given-names></name> <name><surname>Chen</surname> <given-names>TE</given-names></name> <name><surname>Lan</surname> <given-names>TH</given-names></name> <name><surname>Chang</surname> <given-names>CM</given-names></name><etal/></person-group> <article-title>Convolutional-neural-network-based radiographs evaluation assisting in early diagnosis of the periodontal bone loss via periapical radiograph</article-title>. <source>J Dent Sci</source>. (<year>2024</year>) <volume>19</volume>(<issue>1</issue>):<fpage>550</fpage>&#x2013;<lpage>59</lpage>. <pub-id pub-id-type="doi">10.1016/j.jds.2023.09.032</pub-id><pub-id pub-id-type="pmid">38303886</pub-id></mixed-citation></ref>
<ref id="B42"><label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Krois</surname> <given-names>J</given-names></name> <name><surname>Garcia Cantu</surname> <given-names>A</given-names></name> <name><surname>Chaurasia</surname> <given-names>A</given-names></name> <name><surname>Patil</surname> <given-names>R</given-names></name> <name><surname>Chaudhari</surname> <given-names>PK</given-names></name> <name><surname>Gaudin</surname> <given-names>R</given-names></name><etal/></person-group> <article-title>Generalizability of deep learning models for dental image analysis</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>(<issue>1</issue>):<fpage>6102</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-021-85454-5</pub-id><pub-id pub-id-type="pmid">33731732</pub-id></mixed-citation></ref>
<ref id="B43"><label>43.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>SW</given-names></name> <name><surname>Huz</surname> <given-names>K</given-names></name> <name><surname>Gorelick</surname> <given-names>K</given-names></name> <name><surname>Li</surname> <given-names>J</given-names></name> <name><surname>Bina</surname> <given-names>T</given-names></name> <name><surname>Matsumura</surname> <given-names>S</given-names></name><etal/></person-group> <article-title>Evaluation by dental professionals of an artificial intelligence-based application to measure alveolar bone loss</article-title>. <source>BMC Oral Health</source>. (<year>2025</year>) <volume>25</volume>(<issue>1</issue>):<fpage>329</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-025-05677-0</pub-id><pub-id pub-id-type="pmid">40025477</pub-id></mixed-citation></ref>
<ref id="B44"><label>44.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Haylaz</surname> <given-names>E</given-names></name> <name><surname>Gumussoy</surname> <given-names>I</given-names></name> <name><surname>Duman</surname> <given-names>SB</given-names></name> <name><surname>Kalabalik</surname> <given-names>F</given-names></name> <name><surname>Eren</surname> <given-names>MC</given-names></name> <name><surname>Demirsoy</surname> <given-names>MS</given-names></name><etal/></person-group> <article-title>Automatic segmentation of the nasolacrimal canal: application of the nnU-Net v2 model in CBCT imaging</article-title>. <source>J Clin Med</source>. (<year>2025</year>) <volume>14</volume>(<issue>3</issue>):<fpage>778</fpage>. <pub-id pub-id-type="doi">10.3390/jcm14030778</pub-id><pub-id pub-id-type="pmid">39941449</pub-id></mixed-citation></ref>
<ref id="B45"><label>45.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Shetty</surname> <given-names>S</given-names></name> <name><surname>Talaat</surname> <given-names>W</given-names></name> <name><surname>AlKawas</surname> <given-names>S</given-names></name> <name><surname>Al-Rawi</surname> <given-names>N</given-names></name> <name><surname>Reddy</surname> <given-names>S</given-names></name> <name><surname>Hamdoon</surname> <given-names>Z</given-names></name><etal/></person-group> <article-title>Application of artificial intelligence-based detection of furcation involvement in mandibular first molar using cone beam tomography images- a preliminary study</article-title>. <source>BMC Oral Health</source>. (<year>2024</year>) <volume>24</volume>(<issue>1</issue>):<fpage>1476</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-024-05268-5</pub-id><pub-id pub-id-type="pmid">39633335</pub-id></mixed-citation></ref>
<ref id="B46"><label>46.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Hao</surname> <given-names>J</given-names></name> <name><surname>Lin</surname> <given-names>H</given-names></name> <name><surname>Pan</surname> <given-names>W</given-names></name> <name><surname>Yang</surname> <given-names>J</given-names></name> <name><surname>Feng</surname> <given-names>Y</given-names></name><etal/></person-group> <article-title>Deep learning-enabled 3D multimodal fusion of cone-beam CT and intraoral mesh scans for clinically applicable tooth-bone reconstruction</article-title>. <source>Patterns (N Y)</source>. (<year>2023</year>) <volume>4</volume>(<issue>9</issue>):<fpage>100825</fpage>. <pub-id pub-id-type="doi">10.1016/j.patter.2023.100825</pub-id><pub-id pub-id-type="pmid">37720330</pub-id></mixed-citation></ref>
<ref id="B47"><label>47.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>Y</given-names></name> <name><surname>Xu</surname> <given-names>Y</given-names></name> <name><surname>Khalil</surname> <given-names>B</given-names></name> <name><surname>Nalley</surname> <given-names>A</given-names></name> <name><surname>Tarce</surname> <given-names>M</given-names></name></person-group>. <article-title>An open deep learning-based framework and model for tooth instance segmentation in dental CBCT</article-title>. <source>Clin Oral Investig</source>. (<year>2025</year>) <volume>29</volume>(<issue>10</issue>):<fpage>473</fpage>. <pub-id pub-id-type="doi">10.1007/s00784-025-06578-w</pub-id><pub-id pub-id-type="pmid">40996470</pub-id></mixed-citation></ref>
<ref id="B48"><label>48.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chen</surname> <given-names>Z</given-names></name> <name><surname>Chen</surname> <given-names>S</given-names></name> <name><surname>Hu</surname> <given-names>F</given-names></name></person-group>. <article-title>CTA-UNet: CNN-transformer architecture UNet for dental CBCT images segmentation</article-title>. <source>Phys Med Biol</source>. (<year>2023</year>) <volume>68</volume>(<issue>17</issue>):<fpage>175042</fpage>. <pub-id pub-id-type="doi">10.1088/1361-6560/acf026</pub-id></mixed-citation></ref>
<ref id="B49"><label>49.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>Y</given-names></name> <name><surname>Zhu</surname> <given-names>L</given-names></name> <name><surname>Wang</surname> <given-names>W</given-names></name> <name><surname>Lv</surname> <given-names>L</given-names></name> <name><surname>Li</surname> <given-names>Q</given-names></name> <name><surname>Liu</surname> <given-names>Y</given-names></name><etal/></person-group> <article-title>Progressive multi-task learning for fine-grained dental implant classification and segmentation in CBCT image</article-title>. <source>Comput Biol Med</source>. (<year>2025</year>) <volume>189</volume>:<fpage>109896</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2025.109896</pub-id><pub-id pub-id-type="pmid">40073494</pub-id></mixed-citation></ref>
<ref id="B50"><label>50.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Palkovics</surname> <given-names>D</given-names></name> <name><surname>Molnar</surname> <given-names>B</given-names></name> <name><surname>Pinter</surname> <given-names>C</given-names></name> <name><surname>Garc&#x00ED;a-Mato</surname> <given-names>D</given-names></name> <name><surname>Diaz-Pinto</surname> <given-names>A</given-names></name> <name><surname>Windisch</surname> <given-names>P</given-names></name><etal/></person-group> <article-title>Automatic deep learning segmentation of mandibular periodontal bone topography on cone-beam computed tomography images</article-title>. <source>J Dent</source>. (<year>2025</year>) <volume>159</volume>:<fpage>105813</fpage>. <pub-id pub-id-type="doi">10.1016/j.jdent.2025.105813</pub-id><pub-id pub-id-type="pmid">40373868</pub-id></mixed-citation></ref>
<ref id="B51"><label>51.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Pan</surname> <given-names>X</given-names></name> <name><surname>Wang</surname> <given-names>C</given-names></name> <name><surname>Luo</surname> <given-names>X</given-names></name> <name><surname>Dong</surname> <given-names>Q</given-names></name> <name><surname>Sun</surname> <given-names>H</given-names></name> <name><surname>Zhang</surname> <given-names>W</given-names></name><etal/></person-group> <article-title>Development and verification of a convolutional neural network-based model for automatic mandibular canal localization on multicenter CBCT images</article-title>. <source>BMC Oral Health</source>. (<year>2025</year>) <volume>25</volume>(<issue>1</issue>):<fpage>1352</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-025-06724-6</pub-id><pub-id pub-id-type="pmid">40841637</pub-id></mixed-citation></ref>
<ref id="B52"><label>52.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Felsch</surname> <given-names>M</given-names></name> <name><surname>Meyer</surname> <given-names>O</given-names></name> <name><surname>Schlickenrieder</surname> <given-names>A</given-names></name> <name><surname>Engels</surname> <given-names>P</given-names></name> <name><surname>Sch&#x00F6;newolf</surname> <given-names>J</given-names></name> <name><surname>Z&#x00F6;llner</surname> <given-names>F</given-names></name><etal/></person-group> <article-title>Detection and localization of caries and hypomineralization on dental photographs with a vision transformer model</article-title>. <source>NPJ Digit Med</source>. (<year>2023</year>) <volume>6</volume>(<issue>1</issue>):<fpage>198</fpage>. <pub-id pub-id-type="doi">10.1038/s41746-023-00944-2</pub-id><pub-id pub-id-type="pmid">37880375</pub-id></mixed-citation></ref>
<ref id="B53"><label>53.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Tao</surname> <given-names>LR</given-names></name> <name><surname>Li</surname> <given-names>Y</given-names></name> <name><surname>Wu</surname> <given-names>XY</given-names></name> <name><surname>Gu</surname> <given-names>Y</given-names></name> <name><surname>Xie</surname> <given-names>Y</given-names></name> <name><surname>Yu</surname> <given-names>XY</given-names></name><etal/></person-group> <article-title>Deep learning photo processing for periodontitis screening</article-title>. <source>J Dent Res</source>. (<year>2025</year>):<fpage>220345251347508</fpage>. <pub-id pub-id-type="doi">10.1177/00220345251347508</pub-id><pub-id pub-id-type="pmid">40650464</pub-id></mixed-citation></ref>
<ref id="B54"><label>54.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Wen</surname> <given-names>C</given-names></name> <name><surname>Bai</surname> <given-names>X</given-names></name> <name><surname>Yang</surname> <given-names>J</given-names></name> <name><surname>Li</surname> <given-names>S</given-names></name> <name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Yang</surname> <given-names>D</given-names></name></person-group>. <article-title>Deep learning based approach: automated gingival inflammation grading model using gingival removal strategy</article-title>. <source>Sci Rep</source>. (<year>2024</year>) <volume>14</volume>(<issue>1</issue>):<fpage>19780</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-024-70311-y</pub-id><pub-id pub-id-type="pmid">39187553</pub-id></mixed-citation></ref>
<ref id="B55"><label>55.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rezallah</surname> <given-names>NN</given-names></name> <name><surname>Sherif</surname> <given-names>G</given-names></name> <name><surname>Abdelkarim</surname> <given-names>AZ</given-names></name> <name><surname>Afifi</surname> <given-names>S</given-names></name></person-group>. <article-title>Enhancing periodontal bone loss diagnosis through advanced AI techniques</article-title>. <source>Appl Sci</source>. (<year>2025</year>) <volume>15</volume>(<issue>12</issue>):<fpage>6832</fpage>. <pub-id pub-id-type="doi">10.3390/app15126832</pub-id></mixed-citation></ref>
<ref id="B56"><label>56.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Naufal</surname> <given-names>M</given-names></name> <name><surname>Fatichah</surname> <given-names>C</given-names></name> <name><surname>Astuti</surname> <given-names>E</given-names></name> <name><surname>Putra</surname> <given-names>R</given-names></name></person-group>. <article-title>YOLOv8-Based segmentation and 3D reconstruction of alveolar bone and mandibular canal in CBCT images</article-title>. (<year>2024</year>) <fpage>425</fpage>&#x2013;<lpage>30</lpage> p.</mixed-citation></ref>
<ref id="B57"><label>57.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Rashid</surname> <given-names>MO</given-names></name> <name><surname>Gaghor</surname> <given-names>S</given-names></name></person-group>. <article-title>Clinical application of a deep learning system for automatic mandibular alveolar bone quantity assessment and suggested treatment options using CBCT cross-sections</article-title>. <source>Medicine (Baltimore)</source>. (<year>2025</year>) <volume>104</volume>(<issue>30</issue>):<fpage>e43257</fpage>. <pub-id pub-id-type="doi">10.1097/md.0000000000043257</pub-id><pub-id pub-id-type="pmid">40725950</pub-id></mixed-citation></ref>
<ref id="B58"><label>58.</label><mixed-citation publication-type="confproc"><person-group person-group-type="author"><name><surname>Widiasri</surname> <given-names>M</given-names></name> <name><surname>Suciati</surname> <given-names>N</given-names></name> <name><surname>Fatichah</surname> <given-names>C</given-names></name> <name><surname>Astuti</surname> <given-names>E</given-names></name> <name><surname>Putra</surname> <given-names>R</given-names></name></person-group>. <article-title>Alveolar bone and mandibular canal segmentation on cone beam computed tomography images using U-net</article-title>. (<year>2023</year>) <fpage>36</fpage>&#x2013;<lpage>41</lpage> p.</mixed-citation></ref>
<ref id="B59"><label>59.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>X</given-names></name> <name><surname>Guo</surname> <given-names>E</given-names></name> <name><surname>Liu</surname> <given-names>X</given-names></name> <name><surname>Zhao</surname> <given-names>H</given-names></name> <name><surname>Yang</surname> <given-names>J</given-names></name> <name><surname>Li</surname> <given-names>W</given-names></name><etal/></person-group> <article-title>Enhancing furcation involvement classification on panoramic radiographs with vision transformers</article-title>. <source>BMC Oral Health</source>. (<year>2025</year>) <volume>25</volume>(<issue>1</issue>):<fpage>153</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-025-05431-6</pub-id><pub-id pub-id-type="pmid">39881302</pub-id></mixed-citation></ref>
<ref id="B60"><label>60.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hadzic</surname> <given-names>A</given-names></name> <name><surname>Urschler</surname> <given-names>M</given-names></name> <name><surname>Press</surname> <given-names>JA</given-names></name> <name><surname>Riedl</surname> <given-names>R</given-names></name> <name><surname>Rugani</surname> <given-names>P</given-names></name> <name><surname>&#x0160;tern</surname> <given-names>D</given-names></name><etal/></person-group> <article-title>Evaluating a periapical lesion detection CNN on a clinically representative CBCT dataset-A validation study</article-title>. <source>J Clin Med</source>. (<year>2023</year>) <volume>13</volume>(<issue>1</issue>):<fpage>197</fpage>. <pub-id pub-id-type="doi">10.3390/jcm13010197</pub-id><pub-id pub-id-type="pmid">38202204</pub-id></mixed-citation></ref>
<ref id="B61"><label>61.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>J</given-names></name> <name><surname>Jin</surname> <given-names>C</given-names></name> <name><surname>Wang</surname> <given-names>X</given-names></name> <name><surname>Pan</surname> <given-names>K</given-names></name> <name><surname>Li</surname> <given-names>Z</given-names></name> <name><surname>Yi</surname> <given-names>X</given-names></name><etal/></person-group> <article-title>A comparative analysis of deep learning models for assisting in the diagnosis of periapical lesions in periapical radiographs</article-title>. <source>BMC Oral Health</source>. (<year>2025</year>) <volume>25</volume>(<issue>1</issue>):<fpage>801</fpage>. <pub-id pub-id-type="doi">10.1186/s12903-025-06104-0</pub-id><pub-id pub-id-type="pmid">40420083</pub-id></mixed-citation></ref>
<ref id="B62"><label>62.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chang</surname> <given-names>HJ</given-names></name> <name><surname>Lee</surname> <given-names>SJ</given-names></name> <name><surname>Yong</surname> <given-names>TH</given-names></name> <name><surname>Shin</surname> <given-names>NY</given-names></name> <name><surname>Jang</surname> <given-names>BG</given-names></name> <name><surname>Kim</surname> <given-names>JE</given-names></name><etal/></person-group> <article-title>Deep learning hybrid method to automatically diagnose periodontal bone loss and stage periodontitis</article-title>. <source>Sci Rep</source>. (<year>2020</year>) <volume>10</volume>(<issue>1</issue>):<fpage>7531</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-020-64509-z</pub-id><pub-id pub-id-type="pmid">32372049</pub-id></mixed-citation></ref>
<ref id="B63"><label>63.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Muhammed Sunnetci</surname> <given-names>K</given-names></name> <name><surname>Ulukaya</surname> <given-names>S</given-names></name> <name><surname>Alkan</surname> <given-names>A</given-names></name></person-group>. <article-title>Periodontal bone loss detection based on hybrid deep learning and machine learning models with a user-friendly application</article-title>. <source>Biomed Signal Process Control</source>. (<year>2022</year>) <volume>77</volume>:<fpage>103844</fpage>. <pub-id pub-id-type="doi">10.1016/j.bspc.2022.103844</pub-id></mixed-citation></ref>
<ref id="B64"><label>64.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Dujic</surname> <given-names>H</given-names></name> <name><surname>Meyer</surname> <given-names>O</given-names></name> <name><surname>Hoss</surname> <given-names>P</given-names></name> <name><surname>W&#x00F6;lfle</surname> <given-names>UC</given-names></name> <name><surname>W&#x00FC;lk</surname> <given-names>A</given-names></name> <name><surname>Meusburger</surname> <given-names>T</given-names></name><etal/></person-group> <article-title>Automatized detection of periodontal bone loss on periapical radiographs by vision transformer networks</article-title>. <source>Diagnostics (Basel)</source>. (<year>2023</year>) <volume>13</volume>(<issue>23</issue>):<fpage>3562</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics13233562</pub-id><pub-id pub-id-type="pmid">38066803</pub-id></mixed-citation></ref>
<ref id="B65"><label>65.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Co&#x015F;gun Baybars</surname> <given-names>S</given-names></name> <name><surname>Daldal</surname> <given-names>M</given-names></name> <name><surname>Parlak Baydo&#x011F;an</surname> <given-names>M</given-names></name> <name><surname>Arslan Tuncer</surname> <given-names>S</given-names></name></person-group>. <article-title>Evaluation of apical closure in panoramic radiographs using vision transformer architectures ViT-based apical closure classification</article-title>. <source>Diagnostics (Basel)</source>. (<year>2025</year>) <volume>15</volume>(<issue>18</issue>):<fpage>2350</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics15182350</pub-id></mixed-citation></ref>
<ref id="B66"><label>66.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gao</surname> <given-names>S</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Li</surname> <given-names>X</given-names></name> <name><surname>Li</surname> <given-names>Z</given-names></name> <name><surname>Deng</surname> <given-names>Y</given-names></name></person-group>. <article-title>Transformer based tooth classification from cone-beam computed tomography for dental charting</article-title>. <source>Comput Biol Med</source>. (<year>2022</year>) <volume>148</volume>:<fpage>105880</fpage>. <pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.105880</pub-id><pub-id pub-id-type="pmid">35914362</pub-id></mixed-citation></ref>
<ref id="B67"><label>67.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>K&#x00FC;&#x00E7;&#x00FC;k</surname> <given-names>DB</given-names></name> <name><surname>Imak</surname> <given-names>A</given-names></name> <name><surname>&#x00D6;z&#x00E7;elik</surname> <given-names>STA</given-names></name> <name><surname>&#x00C7;elebi</surname> <given-names>A</given-names></name> <name><surname>T&#x00FC;rko&#x011F;lu</surname> <given-names>M</given-names></name> <name><surname>Sengur</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Hybrid CNN-transformer model for accurate impacted tooth detection in panoramic radiographs</article-title>. <source>Diagnostics (Basel)</source>. (<year>2025</year>) <volume>15</volume>(<issue>3</issue>):<fpage>244</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics15030244</pub-id></mixed-citation></ref>
<ref id="B68"><label>68.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Di Martino</surname> <given-names>F</given-names></name> <name><surname>Delmastro</surname> <given-names>F</given-names></name></person-group>. <article-title>Explainable AI for clinical and remote health applications: a survey on tabular and time series data</article-title>. <source>Artif Intell Rev</source>. (<year>2023</year>) <volume>56</volume>(<issue>6</issue>):<fpage>5261</fpage>&#x2013;<lpage>315</lpage>. <pub-id pub-id-type="doi">10.1007/s10462-022-10304-3</pub-id><pub-id pub-id-type="pmid">36320613</pub-id></mixed-citation></ref>
<ref id="B69"><label>69.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Salvi</surname> <given-names>M</given-names></name> <name><surname>Seoni</surname> <given-names>S</given-names></name> <name><surname>Campagner</surname> <given-names>A</given-names></name> <name><surname>Gertych</surname> <given-names>A</given-names></name> <name><surname>Acharya</surname> <given-names>UR</given-names></name> <name><surname>Molinari</surname> <given-names>F</given-names></name><etal/></person-group> <article-title>Explainability and uncertainty: two sides of the same coin for enhancing the interpretability of deep learning models in healthcare</article-title>. <source>Int J Med Inform</source>. (<year>2025</year>) <volume>197</volume>:<fpage>105846</fpage>. <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2025.105846</pub-id><pub-id pub-id-type="pmid">39993336</pub-id></mixed-citation></ref>
<ref id="B70"><label>70.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Singh</surname> <given-names>Y</given-names></name> <name><surname>Hathaway</surname> <given-names>QA</given-names></name> <name><surname>Keishing</surname> <given-names>V</given-names></name> <name><surname>Salehi</surname> <given-names>S</given-names></name> <name><surname>Wei</surname> <given-names>Y</given-names></name> <name><surname>Horvat</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>Beyond <italic>post hoc</italic> explanations: a comprehensive framework for accountable AI in medical imaging through transparency, interpretability, and explainability</article-title>. <source>Bioengineering (Basel)</source>. (<year>2025</year>) <volume>12</volume>(<issue>8</issue>):<fpage>879</fpage>. <pub-id pub-id-type="doi">10.3390/bioengineering12080879</pub-id><pub-id pub-id-type="pmid">40868392</pub-id></mixed-citation></ref>
<ref id="B71"><label>71.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Uribe</surname> <given-names>SE</given-names></name> <name><surname>Hamdan</surname> <given-names>MH</given-names></name> <name><surname>Valente</surname> <given-names>NA</given-names></name> <name><surname>Yamaguchi</surname> <given-names>S</given-names></name> <name><surname>Umer</surname> <given-names>F</given-names></name> <name><surname>Tichy</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>Evaluating dental AI research papers: key considerations for editors and reviewers</article-title>. <source>J Dent.</source> (<year>2025</year>) <volume>160</volume>:<fpage>105867</fpage>. <pub-id pub-id-type="doi">10.1016/j.jdent.2025.105867</pub-id><pub-id pub-id-type="pmid">40451605</pub-id></mixed-citation></ref>
<ref id="B72"><label>72.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sitaras</surname> <given-names>S</given-names></name> <name><surname>Tsolakis</surname> <given-names>IA</given-names></name> <name><surname>Gelsini</surname> <given-names>M</given-names></name> <name><surname>Tsolakis</surname> <given-names>AI</given-names></name> <name><surname>Schwendicke</surname> <given-names>F</given-names></name> <name><surname>Wolf</surname> <given-names>TG</given-names></name><etal/></person-group> <article-title>Applications of artificial intelligence in dental medicine: a critical review</article-title>. <source>Int Dent J</source>. (<year>2025</year>) <volume>75</volume>(<issue>2</issue>):<fpage>474</fpage>&#x2013;<lpage>86</lpage>. <pub-id pub-id-type="doi">10.1016/j.identj.2024.11.009</pub-id><pub-id pub-id-type="pmid">39843259</pub-id></mixed-citation></ref>
<ref id="B73"><label>73.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schneider</surname> <given-names>L</given-names></name> <name><surname>Krasowski</surname> <given-names>A</given-names></name> <name><surname>Pitchika</surname> <given-names>V</given-names></name> <name><surname>Bombeck</surname> <given-names>L</given-names></name> <name><surname>Schwendicke</surname> <given-names>F</given-names></name> <name><surname>B&#x00FC;ttner</surname> <given-names>M</given-names></name></person-group>. <article-title>Assessment of CNNs, transformers, and hybrid architectures in dental image segmentation</article-title>. <source>J Dent</source>. (<year>2025</year>) <volume>156</volume>:<fpage>105668</fpage>. <pub-id pub-id-type="doi">10.1016/j.jdent.2025.105668</pub-id><pub-id pub-id-type="pmid">40064460</pub-id></mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/968900/overview">Camila Tirapelli</ext-link>, University of S&#x00E3;o Paulo, Ribeir&#x00E3;o Preto, Brazil</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/990924/overview">Walter Y. H. Lam</ext-link>, The University of Hong Kong, Hong Kong SAR, China</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2403412/overview">Alessio Rosa</ext-link>, University of Rome Tor Vergata, Italy</p></fn>
</fn-group>
</back>
</article>