<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3-mathml3.dtd">
<article article-type="research-article" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" dtd-version="1.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Dent. Med.</journal-id><journal-title-group>
<journal-title>Frontiers in Dental Medicine</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Dent. Med.</abbrev-journal-title></journal-title-group>
<issn pub-type="epub">2673-4915</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fdmed.2026.1730454</article-id>
<article-version article-version-type="Version of Record" vocab="NISO-RP-8-2008"/>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Original Research</subject>
</subj-group>
</article-categories>
<title-group>
<article-title>Developing an AI-powered tool for radiographic feedback on working length determination in pre-clinical endodontic training</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author"><name><surname>Aljamani</surname><given-names>Sanaa</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3364610/overview"/><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role></contrib>
<contrib contrib-type="author" corresp="yes"><name><surname>AlMomani</surname><given-names>Iman</given-names></name>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="aff" rid="aff4"><sup>4</sup></xref>
<xref ref-type="aff" rid="aff5"><sup>5</sup></xref>
<xref ref-type="corresp" rid="cor1">&#x002A;</xref><uri xlink:href="https://loop.frontiersin.org/people/3244448/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; review &#x0026; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-review-editing/">Writing &#x2013; review &#x0026; editing</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration/">Project administration</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="resources" vocab-term-identifier="https://credit.niso.org/contributor-roles/resources/">Resources</role></contrib>
<contrib contrib-type="author"><name><surname>El-Shafai</surname><given-names>Walid</given-names></name>
<xref ref-type="aff" rid="aff6"><sup>6</sup></xref>
<xref ref-type="aff" rid="aff7"><sup>7</sup></xref><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role></contrib>
<contrib contrib-type="author"><name><surname>AL-Akhras</surname><given-names>Mousa</given-names></name>
<xref ref-type="aff" rid="aff8"><sup>8</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2656610/overview" />
<role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role></contrib>
<contrib contrib-type="author"><name><surname>AlHaddad</surname><given-names>AbdulAziz</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/3276703/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role></contrib>
<contrib contrib-type="author"><name><surname>Abu zaghlan</surname><given-names>Rawan</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref><uri xlink:href="https://loop.frontiersin.org/people/2932669/overview" /><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role><role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing &#x2013; original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing &#x2013; original draft</role></contrib>
</contrib-group>
<aff id="aff1"><label>1</label><institution>Restorative Department, School of Dentistry, The University of Jordan</institution>, <city>Amman</city>, <country country="jo">Jordan</country></aff>
<aff id="aff2"><label>2</label><institution>Consultant in Endodontics, Restorative Department, Jordan University Hospital</institution>, <city>Amman</city>, <country country="jo">Jordan</country></aff>
<aff id="aff3"><label>3</label><institution>Computer Science Department, King Abdullah II School of Information Technology, The University of Jordan</institution>, <city>Amman</city>, <country country="jo">Jordan</country></aff>
<aff id="aff4"><label>4</label><institution>Intelligent Cybersecurity Engineering Research Group, The University of Jordan</institution>, <city>Amman</city>, <country country="jo">Jordan</country></aff>
<aff id="aff5"><label>5</label><institution>Computer Science Department, CCIS, Prince Sultan University</institution>, <city>Riyadh</city>, <country>Saudi Arabia</country></aff>
<aff id="aff6"><label>6</label><institution>Department of Electronics and Electrical Communications Engineering, Faculty of Electronic Engineering, Menoufia University</institution>, <city>Menouf</city>, <country country="eg">Egypt</country></aff>
<aff id="aff7"><label>7</label><institution>Automated Systems and Computing Lab (ASCL), Prince Sultan University</institution>, <city>Riyadh</city>, <country country="sa">Saudi Arabia</country></aff>
<aff id="aff8"><label>8</label><institution>Computer Information Systems Department, King Abdullah II School of Information Technology, The University of Jordan</institution>, <city>Amman</city>, <country country="jo">Jordan</country></aff>
<author-notes>
<corresp id="cor1"><label>&#x002A;</label><bold>Correspondence:</bold> Iman AlMomani <email xlink:href="mailto:i.momani@ju.edu.jo">i.momani@ju.edu.jo</email></corresp>
</author-notes>
<pub-date publication-format="electronic" date-type="pub" iso-8601-date="2026-02-27"><day>27</day><month>02</month><year>2026</year></pub-date>
<pub-date publication-format="electronic" date-type="collection"><year>2026</year></pub-date>
<volume>7</volume><elocation-id>1730454</elocation-id>
<history>
<date date-type="received"><day>22</day><month>10</month><year>2025</year></date>
<date date-type="rev-recd"><day>26</day><month>01</month><year>2026</year></date>
<date date-type="accepted"><day>02</day><month>02</month><year>2026</year></date>
</history>
<permissions>
<copyright-statement>&#x00A9; 2026 Aljamani, AlMomani, El-Shafai, AL-Akhras, AlHaddad and Abu zaghlan.</copyright-statement>
<copyright-year>2026</copyright-year><copyright-holder>Aljamani, AlMomani, El-Shafai, AL-Akhras, AlHaddad and Abu zaghlan</copyright-holder><license><ali:license_ref start_date="2026-02-27">https://creativecommons.org/licenses/by/4.0/</ali:license_ref><license-p>This is an open-access article distributed under the terms of the <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution License (CC BY)</ext-link>. The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</license-p></license>
</permissions>
<abstract><sec><title>Background</title>
<p>Establishing an accurate working length is a critical step in root canal treatment and directly influences clinical success. As artificial intelligence increasingly integrates into medical education, applying it to enhance endodontic training has become increasingly important.</p>
</sec><sec><title>Aims</title>
<p>This study aimed to develop a machine learning&#x2013;based tool that provides prompt, personalized, constructive feedback on radiographic working length determination in a pre-clinical setting and to evaluate its usability among dental students.</p>
</sec><sec><title>Methods</title>
<p>A newly labeled dataset of 3,000 radiographic images was created and categorized into optimal, over-extended, and under-extended working lengths. This dataset was balanced and split into 80&#x0025;, 10&#x0025;, and 10&#x0025; for training, validation, and testing, respectively. Twenty-two convolutional neural network models were developed, trained, and evaluated using five diagnostic metrics (accuracy, F1-score, precision, recall, and testing time). The best-performing model was integrated into a web-based platform and piloted with 30 pre-clinical dental students who provided usability feedback via a Likert-scale questionnaire. The study hypothesized that students would rate the tool as usable and educationally supportive.</p>
</sec><sec><title>Results</title>
<p>The custom-developed deep CNN achieved 97&#x0025;&#x2013;99&#x0025; accuracy, 95&#x0025;&#x2013;98&#x0025; F1-score, 94&#x0025;&#x2013;99&#x0025; precision, and a recall rate of 96&#x0025;&#x2013;98&#x0025;, with an average testing time of 0.54&#x2005;s. Students rated the proposed system positively across clarity, ease of use, and learning support, with median usability scores of 5.0 across all items and interquartile ranges of 4&#x2013;5 to 5&#x2013;5.</p>
</sec><sec><title>Conclusion</title>
<p>The AI-powered feedback system demonstrated high accuracy with strong user acceptance. By delivering instant, constructive feedback on working length determination, it supports effective learning and skill refinement in endodontic education. It is also beneficial in classrooms with large student populations. Future work will expand the dataset and integrate additional stages of root canal training into a unified AI-based educational platform.</p>
</sec>
</abstract>
<kwd-group>
<kwd>artificial intelligence</kwd>
<kwd>constructive feedback</kwd>
<kwd>convolutional neural network (CNN)</kwd>
<kwd>education technology</kwd>
<kwd>endodontic education</kwd>
<kwd>ground-truthing</kwd>
<kwd>machine learning</kwd>
<kwd>radiographic</kwd>
</kwd-group><funding-group><funding-statement>The author(s) declared that financial support was not received for this work and/or its publication.</funding-statement></funding-group><counts>
<fig-count count="7"/>
<table-count count="7"/><equation-count count="0"/><ref-count count="56"/><page-count count="16"/><word-count count="0"/></counts><custom-meta-group><custom-meta><meta-name>section-at-acceptance</meta-name><meta-value>Endodontics</meta-value></custom-meta></custom-meta-group>
</article-meta>
</front>
<body><sec id="s1" sec-type="intro"><label>1</label><title>Introduction</title>
<p>Root canal treatment (RCT) is a fundamental procedure in endodontics aimed at eliminating dental pulp infections through the cleaning and shaping of the root canal system, utilizing specialized instruments and materials (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B2">2</xref>). <xref ref-type="fig" rid="F1">Figure&#x00A0;1</xref> illustrates different stages of RCT, which involve chemical and mechanical cleaning to the correct working length of the root canal and filling the space with a dedicated root canal-filling material.</p>
<fig id="F1" position="float"><label>Figure&#x00A0;1</label>
<caption><p>The technical stages of root canal treatment.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdmed-07-1730454-g001.tif"><alt-text content-type="machine-generated">Four-panel dental procedure sequence showing: (a) clinical photograph of a Premolar with an access cavity, (b) radiograph with instrument determining root canal length, (c) radiograph with a master cone fitted inside the canal, (d) radiograph displaying root canal obturation completion.</alt-text>
</graphic>
</fig>
<p>The success of RCT is influenced by various factors, including tooth-related aspects, accurate diagnosis, and careful interpretation of radiographic examinations (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B4">4</xref>). The technical stages of RCT follow a sequential approach, beginning with access cavity preparation, followed by working length (WL) determination, chemo-mechanical preparation, and obturation. Each step is codependent, with the accuracy of one step directly affecting the next (<xref ref-type="bibr" rid="B1">1</xref>, <xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B5">5</xref>).</p>
<p>Working length (WL) in RCT defines the extent to which cleaning and shaping should be performed within the root canal (<xref ref-type="bibr" rid="B4">4</xref>). Literature suggests that WL should terminate within 2&#x2005;mm of the radiographic apex to optimize treatment outcomes (<xref ref-type="bibr" rid="B6">6</xref>&#x2013;<xref ref-type="bibr" rid="B8">8</xref>). Incorrect working length determination can lead to undesirable technical and biological consequences of the whole endodontic procedure (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B9">9</xref>, <xref ref-type="bibr" rid="B10">10</xref>). Historically, WL has been determined using various techniques, including patient response and tactile sensation. However, these techniques are often inaccurate due to factors such as root canal morphology, apical inflammation, and patient variability (<xref ref-type="bibr" rid="B11">11</xref>, <xref ref-type="bibr" rid="B12">12</xref>). Electronic apex locators&#x2019; technique has demonstrated improved accuracy in WL determination and reduced radiation exposure, but is also associated with limitations when used alone (<xref ref-type="bibr" rid="B8">8</xref>, <xref ref-type="bibr" rid="B13">13</xref>, <xref ref-type="bibr" rid="B14">14</xref>). A combined approach incorporating radiographic WL measurements is recommended to ensure reliable results of WL determination (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B15">15</xref>, <xref ref-type="bibr" rid="B16">16</xref>).</p>
<p>Artificial intelligence (AI) is increasingly being adopted in the dental field to enhance efficiency and accuracy (<xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B20">20</xref>). AI-driven machine learning models based on radiographic and image analysis have shown promising results in diagnosing dental caries (<xref ref-type="bibr" rid="B21">21</xref>), detecting periapical pathology and diagnosis (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B22">22</xref>&#x2013;<xref ref-type="bibr" rid="B24">24</xref>), assessing the quality of root canal filling (<xref ref-type="bibr" rid="B25">25</xref>, <xref ref-type="bibr" rid="B26">26</xref>), and improving orthodontic treatment planning (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B28">28</xref>).</p>
<p>The research on AI applications in WL determination remains limited (<xref ref-type="bibr" rid="B28">28</xref>). In one study, AI-driven models for WL determination have demonstrated considerable accuracy, with some models achieving 96&#x0025; accuracy compared to specialist endodontists 76&#x0025; (<xref ref-type="bibr" rid="B29">29</xref>). Another model achieved 85&#x0025; accuracy compared to the dual-frequency impedance ratio method (<xref ref-type="bibr" rid="B30">30</xref>). <xref ref-type="table" rid="T1">Table&#x00A0;1</xref> summarizes relevant studies that explored AI work in radiographic examination in endodontics.</p>
<table-wrap id="T1" position="float"><label>Table&#x00A0;1</label>
<caption><p>Summary of AI methods used in dental radiography studies.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Author and year</th>
<th valign="top" align="center">Diagnostic technique</th>
<th valign="top" align="center">AI method</th>
<th valign="top" align="center">Outcome measure</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Saghiri et al., 2012</td>
<td valign="top" align="left">Periapical radiographs/cadaver study</td>
<td valign="top" align="left">ANN alone</td>
<td valign="top" align="left">93&#x0025; accuracy in working length determination</td>
</tr>
<tr>
<td valign="top" align="left">Saghiri et al., 2012</td>
<td valign="top" align="left">Periapical radiographs/adaver study</td>
<td valign="top" align="left">ANN compared to Endodontists</td>
<td valign="top" align="left">96&#x0025; accuracy in anatomical position of minor apical foramen compared to 76&#x0025; of endodontists</td>
</tr>
<tr>
<td valign="top" align="left">Qiao et al., 2020</td>
<td valign="top" align="left">Circuit system designed for working length measurements</td>
<td valign="top" align="left">Neural network-based multi-frequency impedance method</td>
<td valign="top" align="left">95&#x0025; accuracy in working length determination</td>
</tr>
<tr>
<td valign="top" align="left">HA et al., 2023</td>
<td valign="top" align="left">Radiographs/ Augmented images</td>
<td valign="top" align="left">Deep learning Model (YOLOv5s, YOLOv5x, and YOLOv7)</td>
<td valign="top" align="left">Denoising and data balancing of radiographic images have set the accuracy of the three models to be above 95&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Michael G et al., 2020</td>
<td valign="top" align="left">Panoramic radiographs</td>
<td valign="top" align="left">Predictive deep learning algorithm</td>
<td valign="top" align="left">The rank correlation between model and cohort confidence scores for positive and negative condition cases was 0.72 and 0.34, respectively.</td>
</tr>
<tr>
<td valign="top" align="left">X Gao et al., 2021</td>
<td valign="top" align="left">Radiographic and clinical data</td>
<td valign="top" align="left">BP artificial neural network model</td>
<td valign="top" align="left">The accuracy of this BP neural network model was 95.60&#x0025; for the prediction of post-operative pain.</td>
</tr>
<tr>
<td valign="top" align="left">NP M et al., 2022</td>
<td valign="top" align="left">Periapical radiograph (3000)</td>
<td valign="top" align="left">CNN to score periapical lesion on periapical radiographs using the PAI score system</td>
<td valign="top" align="left">True prediction PAI 1: 90.0&#x0025;, PAI 2,5: 30&#x0025;, PAI 3: 60&#x0025;, PAI 4: 71&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">MJ et al., 2017</td>
<td valign="top" align="left">Periapical radiograph and CBCT (<italic>N</italic>&#x2009;&#x003D;&#x2009;240)</td>
<td valign="top" align="left">Probabilistic neural network (PNN)</td>
<td valign="top" align="left">PNN in CBCT images was 96.6&#x0025; accurate in detecting vertical root fracture</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Undergraduate dental education introduces students to the science and practice of endodontics through pre-clinical training, where they develop knowledge, skills, and confidence via theoretical and practical sessions (<xref ref-type="bibr" rid="B31">31</xref>&#x2013;<xref ref-type="bibr" rid="B34">34</xref>). These sessions emphasize the importance of key factors influencing successful RCT, such as understanding tooth anatomy, accurate diagnosis, careful interpretation of radiographs, and practical steps of root canal treatment (<xref ref-type="bibr" rid="B3">3</xref>, <xref ref-type="bibr" rid="B35">35</xref>, <xref ref-type="bibr" rid="B36">36</xref>).</p>
<p>While AI integration in dental education has been generally well-received, such as virtual reality simulators for operative dentistry (<xref ref-type="bibr" rid="B37">37</xref>&#x2013;<xref ref-type="bibr" rid="B39">39</xref>) and AI-assisted diagnostic assessments in endodontics (<xref ref-type="bibr" rid="B40">40</xref>), existing WL-related AI studies do not provide personalized, immediate, or formative feedback, which is essential for skill acquisition in a pre-clinical setting.</p>
<p>The precise gap addressed in this study is the absence of an AI-based educational tool that moves beyond simple image classification to deliver instant, individualized feedback on radiographic working length determination. Instant formative feedback enables students to recognize errors, self-correct, and refine their technique during the learning process; an advantage not offered by previous WL-focused AI models, which provide only categorical predictions without pedagogical guidance.</p>
<p>This study therefore aimed to develop machine learning models to provide targeted feedback on radiographic working length (WL) determination through the following steps:
<list list-type="simple">
<list-item>
<p>Create a new, balanced, labeled dataset of single-rooted tooth radiographs categorized into optimal, over-extended, and under-extended WL.</p></list-item>
<list-item>
<p>Train and test 22 convolutional neural network (CNN) models on this dataset.</p></list-item>
<list-item>
<p>Evaluate models&#x2019; performance using 11 quantitative metrics.</p></list-item>
<list-item>
<p>Select the highest-performing model&#x2014;primarily based on accuracy&#x2014;to develop an innovative, automated, and instant web-based WL feedback system.</p></list-item>
<list-item>
<p>Pilot the system with pre-clinical dental students to assess its usability and educational effectiveness.</p></list-item>
</list></p>
</sec>
<sec id="s2"><label>2</label><title>Material and methods</title>
<sec id="s2a"><label>2.1</label><title>Proposed AI-driven framework</title>
<p>The methodology in this study outlines a detailed and systematic approach to developing a highly accurate AI-based system for working length determination using dental radiographic images in dental education. This proposed AI-driven framework incorporated both pre-trained and custom-developed Convolutional Neural Network (CNN) models, utilizing their capabilities to overcome the complexities inherent in dental radiographic images. <xref ref-type="fig" rid="F2">Figure&#x00A0;2</xref> presents a schematic overview of the primary components encompassed within the proposed framework. Subsequent subsections detail the various phases in this framework, providing a comprehensive roadmap of the methodological approach adopted by the IT team in this study.</p>
<fig id="F2" position="float"><label>Figure&#x00A0;2</label>
<caption><p>The main steps of the preposed AI-driven framework.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdmed-07-1730454-g002.tif"><alt-text content-type="machine-generated">Flowchart illustrating a five-step process for classifying WL radiographic images using convolutional neural networks, including dataset preparation, preprocessing, model building, fine-tuning and training, and classification and evaluation, with clear divisions for data handling, model training, and iterative improvement.</alt-text>
</graphic>
</fig>
<p>An AI-assisted feedback tool was developed and integrated within a web-based platform to provide automated evaluation and feedback on students&#x2019; laboratory performance. The system was piloted with a group of 30 dental students (&#x2248;10&#x0025; of the enrolled cohort of 300 students) who voluntarily agreed to participate towards the end of their pre-clinical year. This is justified as an appropriate pilot sample size, as 10&#x2013;30 participants are considered sufficient to identify usability patterns and system acceptability.</p>
<p>After using the tool during their laboratory sessions, participants completed a structured feedback questionnaire to evaluate its effectiveness and usefulness. The questionnaire employed a five-point Likert scale (1&#x2009;&#x003D;&#x2009;strongly disagree to 5&#x2009;&#x003D;&#x2009;strongly agree) covering domains of usability, reliability, feedback clarity, and overall satisfaction.</p>
</sec>
<sec id="s2b"><label>2.2</label><title>Dataset preparation</title>
<p>The foundational step of the proposed framework is to assemble a robust dataset. This dataset initially consists of 321 high-resolution digital radiographs and has been expanded to include 3,000 digital radiographs of anterior and premolar human teeth, accessed and extracted using K files of minimum size 15, to measure the WL. These images were obtained from pre-clinical laboratory simulation exercises using extracted teeth only; no patient radiographs were used. Ethical approval for the use of extracted human teeth and radiographic imaging for research and educational purposes was obtained from the Institutional Review Board (IRB), University of Jordan (Decision No. 547/2025). All radiographs were fully de-identified prior to analysis in accordance with IRB-approved procedures and institutional research governance requirements.</p>
<p>The WL dental radiographic images were collected and obtained using the advanced Ai Dental Woodpecker-V1.0.20 imaging software with acquisition parameters of (65&#x2005;kVp, 7&#x2005;mA, and an exposure time of 0.17&#x2005;s).</p>
<p>Duplicate images were identified and removed to maintain data uniqueness and quality, ensuring dataset integrity, robustness, and utility. Concurrently, an extensive labeling operation was conducted, in which each image was systematically annotated to categorize the working length as optimal, under-extended, or over-extended by both a specialist endodontist and a radiologist with 4&#x2013;5 years of experience. Both annotators were calibrated using 50 sample images prior to labeling the entire dataset. Inter-rater agreement was quantified using Cohen&#x0027;s <italic>&#x03BA;</italic>, yielding a <italic>&#x03BA;</italic> value of 0.77 (95&#x0025; CI: 0.72&#x2013;0.82), indicating substantial agreement. Disagreements were resolved through adjudication by a third senior endodontist.</p>
<p>The categorization protocol, developed specifically for this study, relied on precise measurements of the distance between the endodontic file tip and the radiographic root apex as recommended in the literature (<xref ref-type="bibr" rid="B4">4</xref>, <xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B7">7</xref>). The collected images were almost evenly distributed across the classifications: 1,007 radiographic images with optimum WL, 1,001 with under-extended WL, and 1,004 with over-extended WL.</p>
<p>Per-class counts were maintained across training, validation, and test sets using a stratified split. The final partition included 80&#x0025; for training (<italic>n</italic>&#x2009;&#x003D;&#x2009;2,400), 10&#x0025; for validation (<italic>n</italic>&#x2009;&#x003D;&#x2009;300), and 10&#x0025; for test (<italic>n</italic>&#x2009;&#x003D;&#x2009;300). Splits were performed at the tooth level to prevent data leakage, ensuring no radiographs of the same tooth appeared across different subsets. Near-duplicates were removed prior to splitting. No class-specific augmentation was applied beyond standard transformations used uniformly across all categories.</p>
<p>Samples of WL radiographs, categorized according to the classification process, are presented in the representative images in <xref ref-type="fig" rid="F3">Figure&#x00A0;3</xref>, with each numerical category described as follows:
<list list-type="simple">
<list-item>
<p><bold>0</bold>: Optimum WL images, where the file tip is optimally positioned within a 0&#x2013;2&#x2005;mm range from the radiographic apex, ensuring that the cleaning and shaping of the root canal system is within the confines of the root structure.</p></list-item>
<list-item>
<p><bold>1</bold>: Under-extended WL images, where the file tip falls short of reaching the radiographic apex by more than 2&#x2005;mm, potentially leading to inadequate disinfection.</p></list-item>
<list-item>
<p><bold>2</bold>: Over-extended WL images, where the file tip extends beyond the radiographic apex, an undesirable outcome risking over-instrumentation of the root canal system.</p></list-item>
</list></p>
<fig id="F3" position="float"><label>Figure&#x00A0;3</label>
<caption><p>Visual samples of various examined working lengths: <bold>(a)</bold> Optimum WL, <bold>(b)</bold> over extended WL, and <bold>(c)</bold> under extended WL.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdmed-07-1730454-g003.tif"><alt-text content-type="machine-generated">Nine black-and-white dental radiographs display root canal procedures in three rows labeled optimum, over extended, and under extended working length, each showing different file positioning within tooth roots to compare endodontic treatment lengths.</alt-text>
</graphic>
</fig>
<p>All WL digital radiographic images were resized to a uniform resolution of 224&#x2009;&#x00D7;&#x2009;224 pixels. Resizing helps reduce computational load and streamline the processing pipeline (<xref ref-type="bibr" rid="B41">41</xref>, <xref ref-type="bibr" rid="B42">42</xref>).</p>
<p>Because the dataset consisted exclusively of single-rooted anterior and premolar teeth, generalizability to multi-rooted molars, anatomical variations, and different radiographic sensors may be limited. This limitation is addressed further in the discussion section. No external validation dataset from another clinic or imaging system was available for this study.</p>
</sec>
<sec id="s2c"><label>2.3</label><title>Building CNN models</title>
<p>This study employed a dual-pathway model development strategy which encompasses both the integration of state-of-the-art pre-trained CNN architectures [VGG16, ResNet50, VGG19, DenseNet121, DenseNet169, DenseNet201, EfficientNet series (EfficientNetB0- EfficientNetB7), InceptionResNetV2, InceptionV3, MobileNet, MobileNetV2, Mo- bileNetV3Large, MobileNetV3Small, and Xception] (<xref ref-type="bibr" rid="B43">43</xref>&#x2013;<xref ref-type="bibr" rid="B48">48</xref>), and the development of a custom CNN model designed to capture the unique features of endodontic radiographic imaging that generic and pre-trained models may not effectively capture. This aims to harness the robust capabilities of advanced neural networks while addressing the specific challenges associated with dental radiographic images.</p>
<p>A diverse selection of pre-trained CNN architectures was integrated, each offering distinct advantages in image recognition. VGG16 and VGG19 captured texture and details, while ResNet50 and DenseNet series addressed deep network training challenges. EfficientNet models optimized accuracy with minimal computational cost, and InceptionResNetV2 and InceptionV3 improved learning efficiency. MobileNet variants and Xception achieve a balance between speed and accuracy, making them ideal for resource-limited applications.</p>
<p>We began by integrating and selecting a diverse array of proven pre-trained CNN architectures, each known for its distinct advantages in various aspects of image recognition. These architectures include:
<list list-type="simple">
<list-item>
<p><bold>VGG16 and VGG19</bold>: Renowned for their simplicity and deep architecture, which is particularly effective for capturing image texture and details.</p></list-item>
<list-item>
<p><bold>ResNet50 and DenseNet Series</bold> (<bold>DenseNet121, DenseNet169, DenseNet201</bold>): These models leverage residual and dense connections, respectively, to facilitate training deeper networks by alleviating the vanishing gradient problem.</p></list-item>
<list-item>
<p><bold>EfficientNet Series</bold> (<bold>EfficientNetB0</bold>&#x2014;<bold>EfficientNetB7</bold>): Known for scaling up CNNs in a more structured manner to achieve higher accuracy without high computational costs.</p></list-item>
<list-item>
<p><bold>InceptionResNetV2</bold> and <bold>InceptionV3</bold>: These models combine inception modules with residual connections to improve learning speed and accuracy.</p></list-item>
<list-item>
<p><bold>MobileNet Series</bold> (<bold>MobileNet, MobileNetV2, MobileNetV3 Large, MobileNetV3 Small</bold>) and <bold>Xception</bold>: Optimized for mobile devices, these models offer a good balance between speed and accuracy, making them suitable for applications where computational resources are limited.</p></list-item>
</list>Each of these pre-trained models is fine-tuned to adapt to our dental imaging dataset. This involves modifying the top layers of the network to focus on features specific to dental radiographs, such as tooth anatomy and the positioning of endodontic files.</p>
<p>To complement the broad learning capabilities of the pre-trained networks, we also developed a custom CNN architecture tailored to the nuanced requirements of endodontic radiographic analysis. The architecture of this custom model is carefully designed with several specialized layers, as demonstrated in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>; <xref ref-type="table" rid="T2">Table&#x00A0;2</xref>:
<list list-type="simple">
<list-item>
<p><bold>Convolutional Layers</bold>: Configured to detect fine-grained details critical for accurate working length determination.</p></list-item>
<list-item>
<p><bold>Depthwise Separable Convolutions</bold>: Implemented to provide efficient model scaling and detailed feature extraction without a significant increase in computational demand.</p></list-item>
<list-item>
<p><bold>Dilated Convolutions</bold>: Used to expand the receptive field of the network, allowing it to encompass broader contextual information without losing resolution.</p></list-item>
</list></p>
<fig id="F4" position="float"><label>Figure&#x00A0;4</label>
<caption><p>Detailed architecture of the custom-developed CNN model.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdmed-07-1730454-g004.tif"><alt-text content-type="machine-generated">Three-dimensional schematic of a convolutional neural network architecture resembling a tower structure, with color-coded blocks representing different layers such as input, convolution, batch normalization, ReLU, pooling, dropout, flatten, dense, and softmax, accompanied by a labeled legend.</alt-text>
</graphic>
</fig>
<table-wrap id="T2" position="float"><label>Table&#x00A0;2</label>
<caption><p>Layer-by-layer architecture of the custom-developed CNN model.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Type of the layer</th>
<th valign="top" align="center">Output configuration</th>
<th valign="top" align="center">Parameter characteristics</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Batch Normalization (batch normalization)</td>
<td valign="top" align="left">224&#x2009;&#x00D7;&#x2009;224 spatial resolution, 3 channels</td>
<td valign="top" align="left">Count: 12, Normalization parameters</td>
</tr>
<tr>
<td valign="top" align="left">2D Convolution (conv2d)</td>
<td valign="top" align="left">224&#x2009;&#x00D7;&#x2009;224 spatial resolution, 8 units</td>
<td valign="top" align="left">Count: 224, Feature detectors</td>
</tr>
<tr>
<td valign="top" align="left">2D Convolution (conv2d 1)</td>
<td valign="top" align="left">224&#x2009;&#x00D7;&#x2009;224 spatial resolution, 16 units</td>
<td valign="top" align="left">Count: 1,168, Feature detectors</td>
</tr>
<tr>
<td valign="top" align="left">Max Pooling (max pooling2d)</td>
<td valign="top" align="left">Halved resolution: 112&#x2009;&#x00D7;&#x2009;112, 16 units</td>
<td valign="top" align="left">None, Dimensionality reduction</td>
</tr>
<tr>
<td valign="top" align="left">2D Convolution (conv2d 2)</td>
<td valign="top" align="left">Halved resolution: 112&#x2009;&#x00D7;&#x2009;112, 32 units</td>
<td valign="top" align="left">Count: 4,640, Feature detectors</td>
</tr>
<tr>
<td valign="top" align="left">Max Pooling (max pooling2d 1)</td>
<td valign="top" align="left">Quartered resolution: 56&#x2009;&#x00D7;&#x2009;56, 32 units</td>
<td valign="top" align="left">None, Dimensionality reduction</td>
</tr>
<tr>
<td valign="top" align="left">2D Convolution (conv2d 3)</td>
<td valign="top" align="left">Quartered resolution: 56&#x2009;&#x00D7;&#x2009;56, 64 units</td>
<td valign="top" align="left">Count: 18,496, Feature detectors</td>
</tr>
<tr>
<td valign="top" align="left">2D Convolution (conv2d 4)</td>
<td valign="top" align="left">Quartered resolution: 56&#x2009;&#x00D7;&#x2009;56, 64 units</td>
<td valign="top" align="left">Count: 36,928, Feature detectors</td>
</tr>
<tr>
<td valign="top" align="left">Max Pooling (max pooling2d 2)</td>
<td valign="top" align="left">One-eighth resolution: 28&#x2009;&#x00D7;&#x2009;28, 64 units</td>
<td valign="top" align="left">None, Dimensionality reduction</td>
</tr>
<tr>
<td valign="top" align="left">2D Convolution (conv2d 5)</td>
<td valign="top" align="left">One-eighth resolution: 28&#x2009;&#x00D7;&#x2009;28, 256 units</td>
<td valign="top" align="left">Count: 147,712, Advanced feature extraction</td>
</tr>
<tr>
<td valign="top" align="left">Max Pooling (max pooling2d 3)</td>
<td valign="top" align="left">One-sixteenth resolution: 14&#x2009;&#x00D7;&#x2009;14, 256 units</td>
<td valign="top" align="left">None, Dimensionality reduction</td>
</tr>
<tr>
<td valign="top" align="left">Dropout (dropout)</td>
<td valign="top" align="left">Preserved resolution: 14&#x2009;&#x00D7;&#x2009;14, 256 units</td>
<td valign="top" align="left">None, Overfitting mitigation</td>
</tr>
<tr>
<td valign="top" align="left">Global Average Pooling (global average pooling2d)</td>
<td valign="top" align="left">Compressed feature representation: 256 units</td>
<td valign="top" align="left">None, Feature summarization</td>
</tr>
<tr>
<td valign="top" align="left">Flatten (flatten)</td>
<td valign="top" align="left">Vectorization of features: 256 units</td>
<td valign="top" align="left">None, Preparing for dense layers</td>
</tr>
<tr>
<td valign="top" align="left">Fully Connected (dense)</td>
<td valign="top" align="left">High-capacity feature processing: 1,024 units</td>
<td valign="top" align="left">Count: 263,168, Learning dense features</td>
</tr>
<tr>
<td valign="top" align="left">Dropout (dropout 1)</td>
<td valign="top" align="left">Feature selection: 1,024 units</td>
<td valign="top" align="left">None, Overfitting mitigation</td>
</tr>
<tr>
<td valign="top" align="left">Fully Connected (dense 1)</td>
<td valign="top" align="left">High-capacity feature processing: 1,024 units</td>
<td valign="top" align="left">Count: 1,049,600, Learning dense features</td>
</tr>
<tr>
<td valign="top" align="left">Dropout (dropout 2)</td>
<td valign="top" align="left">Feature selection: 1,024 units</td>
<td valign="top" align="left">None, Overfitting mitigation</td>
</tr>
<tr>
<td valign="top" align="left">Output Layer (dense 2)</td>
<td valign="top" align="left">Final decision layer: 3 units (class probabilities)</td>
<td valign="top" align="left">Count: 10,250, Classification decisions</td>
</tr>
<tr>
<td valign="top" align="left" style="background-color:#d9d9d9" colspan="3">Total parameters: 1,532,198, Comprising Trainable: 1,532,192, Non-trainable: 6</td>
</tr>
</tbody>
</table>
</table-wrap>
<p><xref ref-type="table" rid="T2">Table&#x00A0;2</xref> presents the detailed layer-by-layer architecture of the custom-developed CNN model shown in <xref ref-type="fig" rid="F4">Figure&#x00A0;4</xref>, which serves as the backbone of our WL image classification system. The initial batch normalization layer sets the stage for the network by normalizing the input distribution, which is essential for accelerating the training process.</p>
<p>Following this, a precisely designed sequence of 2D convolutional layers with increasing numbers of units is deployed to systematically enhance feature extraction capabilities. The strategic incorporation of max pooling layers systematically reduces the spatial dimensions of the feature maps, significantly reducing computational demands while maintaining the most salient features.</p>
<p>The employment of dropout layers interspersed among the high-capacity, fully connected dense layers functions as a regulatory mechanism to combat overfitting, ensuring that the model generalizes well to new, unseen data. Activation functions such as ReLU introduce nonlinearity, enabling the model to learn complex patterns. Furthermore, batch normalization layers are incorporated to enhance training stability, and dropout is strategically applied to prevent overfitting.</p>
<p>The final output layer, with its SoftMax activation, is calibrated to output three classes, translating the learned patterns into clinically relevant classifications of working length (optimum, over-extended, or under-extended). The proposed CNN architecture has 1.5 million parameters, balancing model complexity and interpretability, a balance paramount in medical image analysis.</p>
<p>By deploying this dual approach, combining the generalized learning capabilities of pre-trained models with the focused precision of a custom-developed architecture, we aim to create a robust system capable of high accuracy and reliability in classifying dental radiographic images.</p>
<p>This methodology not only leverages the strengths of both architectural paradigms but also ensures the models are finely tuned to meet the specific challenges posed by endodontic imaging tasks.</p>
</sec>
<sec id="s2d"><label>2.4</label><title>Fine-tuning and training</title>
<p>The fine-tuning and training of the selected and developed CNN models are critical stages in our study, during which we refine the models to ensure optimal performance on dental radiographic images. This phase is thoroughly designed to fine-tune both pre-trained and custom-developed models, enhancing their ability to accurately identify and classify the varied anatomical features and pathologies evident in endodontic imaging.</p>
<p>Fine-tuning is approached by unfreezing the top layers of the pre-trained models, which are the most likely to capture high-level features specific to our dental dataset. The parameters of these layers are then carefully adjusted through continued training on our dataset (<xref ref-type="bibr" rid="B49">49</xref>). We modify:
<list list-type="simple">
<list-item>
<p><bold>Learning Rates</bold>: Initially set lower than usual to prevent the loss of previously learned features. The learning rate is progressively increased using a cyclical policy, which helps escape local minima and explore a broader region of the solution space.</p></list-item>
<list-item>
<p><bold>Layer Configurations</bold>: Depending on the specific model architecture, certain layers may be added or removed to better capture the nuances of dental images. For instance, additional convolutional layers may be added to deepen the model&#x0027;s ability to process fine details, or fully connected layers may be adjusted to refine the output towards the specific classes of interest.</p></list-item>
</list>Once the models are fine-tuned, they undergo a rigorous training regime using the following advanced techniques to enhance their generalizability and robustness:
<list list-type="simple">
<list-item>
<p><bold>Dropout</bold>: Applied randomly to neurons in the dense layers during training to prevent overfitting by reducing interdependencies among neurons. This method effectively improves the model&#x0027;s ability to generalize from the training data to unseen data.</p></list-item>
<list-item>
<p><bold>Batch Normalization</bold>: Implemented after every convolutional layer to normalize the activations and accelerate the training process. This technique also helps stabilize the neural network by normalizing the input layer by adjusting the mean and variance.</p></list-item>
</list>Both dropout and batch normalization are pivotal for managing internal covariate shift, thereby speeding up training and improving the model&#x0027;s performance during inference on new, unseen images.</p>
<p>Training is conducted in batches, with batch size accurately chosen based on the computational capabilities and the specific architecture being trained. Gradient descent algorithms, specifically Adam or Stochastic Gradient Descent (SGD) with momentum, are utilized to update the weights. These optimizers are selected for their efficiency in handling sparse gradients and robustness in diverse training landscape conditions.</p>
<p>Throughout the training process, the model&#x0027;s performance is continuously monitored using a validation set. This not only ensures the model is not overfit with the training data but also provides insights into how it might perform in a real-world clinical setting. Based on the validation results, the training parameters are iteratively refined to further enhance accuracy and reliability.</p>
<p><xref ref-type="table" rid="T3">Table&#x00A0;3</xref> presents the fine-tuned configuration parameters used to train the convolutional neural networks described in this work. It highlights the software environment and tools, such as Python with TensorFlow and Keras, and emphasizes the advanced computational framework employed. The data is divided into training, validation, and test sets at 80&#x0025;, 10&#x0025;, and 10&#x0025;, respectively, facilitating robust model training and evaluation. A learning rate of 0.0001, combined with the Adam optimization algorithm, ensures efficient convergence during training. Regularization is handled via an L2 penalty with a decay rate of 0.001 to prevent overfitting, thereby improving the models&#x2019; generalization. The networks are trained for 128 epochs with a minimum batch size of 64, using a categorical cross-entropy loss function, which is optimal for the multi-class classification problems inherent to this study.</p>
<table-wrap id="T3" position="float"><label>Table&#x00A0;3</label>
<caption><p>Detailed fine-tuned configuration parameters for CNN model training.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Parameter</th>
<th valign="top" align="center">Specification</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Development language</td>
<td valign="top" align="center">Python</td>
</tr>
<tr>
<td valign="top" align="left">Neural network libraries</td>
<td valign="top" align="center">TensorFlow, Keras</td>
</tr>
<tr>
<td valign="top" align="left">Data split (Train/Validate/Test)</td>
<td valign="top" align="center">80&#x0025;/10&#x0025;/10&#x0025;</td>
</tr>
<tr>
<td valign="top" align="left">Learning rate</td>
<td valign="top" align="center">0.0001</td>
</tr>
<tr>
<td valign="top" align="left">Optimizer</td>
<td valign="top" align="center">Adam</td>
</tr>
<tr>
<td valign="top" align="left">Regularization technique</td>
<td valign="top" align="center">L2 regularization</td>
</tr>
<tr>
<td valign="top" align="left">L2 regularization rate</td>
<td valign="top" align="center">0.001</td>
</tr>
<tr>
<td valign="top" align="left">Training epochs</td>
<td valign="top" align="center">128</td>
</tr>
<tr>
<td valign="top" align="left">Batch size</td>
<td valign="top" align="center">64</td>
</tr>
<tr>
<td valign="top" align="left">Loss computation</td>
<td valign="top" align="center">Categorical cross-entropy</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>This comprehensive fine-tuning and training process is essential for optimizing the performance of our CNN models, ensuring they are not only theoretically sound but also practically viable and effective in a clinical environment.</p>
</sec>
<sec id="s2e"><label>2.5</label><title>CNN models&#x2019; evaluation and classification process</title>
<p>The evaluation process for CNN models assesses their accuracy and reliability to determine their effectiveness and readiness for deployment in clinical settings. The 22 CNN models were comprehensively evaluated using 11 different metrics that are listed in <xref ref-type="table" rid="T4">Table&#x00A0;4</xref>. The model with the best performance was then plugged into a web-based tool to offer a system with high accuracy, reliability, and practical usability in clinical applications.</p>
<table-wrap id="T4" position="float"><label>Table&#x00A0;4</label>
<caption><p>The detailed evaluation metrics and their definitions.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Evaluation metric</th>
<th valign="top" align="center">Definition</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Accuracy</td>
<td valign="top" align="left">Measures the overall correctness of the model across all classes, giving us a straightforward indication of performance</td>
</tr>
<tr>
<td valign="top" align="left">Precision (Positive Predictive Value)</td>
<td valign="top" align="left">&#x00A0;Indicates the accuracy of positive predictions for each class, crucial for applications where the cost of a false positive is high.</td>
</tr>
<tr>
<td valign="top" align="left">Recall (Sensitivity or True Positive Rate):</td>
<td valign="top" align="left">Measures the model&#x0027;s ability to detect all positive instances, which is essential in clinical settings where missing a positive case can have profound implications.</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="left">Harmonizes precision and recall into a single metric, balancing the trade-offs between them and providing a more comprehensive overview of model performance.</td>
</tr>
<tr>
<td valign="top" align="left">Testing time</td>
<td valign="top" align="left">Indicates the efficiency of the model, ensuring the model can be integrated smoothly to provide output without significant delays.</td>
</tr>
<tr>
<td valign="top" align="left">Specificity (True negative rate)</td>
<td valign="top" align="left">Measures the proportion of actual negatives that are correctly identified as such, necessary for confirming the absence of a condition.</td>
</tr>
<tr>
<td valign="top" align="left">Negative predictive value (NVP)</td>
<td valign="top" align="left">Indicates the likelihood that a negative test truly means the absence of a condition, which is valuable in ensuring the reassurance of negative screening results.</td>
</tr>
<tr>
<td valign="top" align="left">False positive rate (FPR)</td>
<td valign="top" align="left">Indicates the probability of falsely classifying a negative case as positive, critical in areas where false alarms are costly.</td>
</tr>
<tr>
<td valign="top" align="left">False negative rate (FNR)</td>
<td valign="top" align="left">Represents the probability of falsely classifying a positive case as negative, which is crucial in settings where missing a condition could be detrimental.</td>
</tr>
<tr>
<td valign="top" align="left">False discovery rate (FDR)</td>
<td valign="top" align="left">Measures the proportion of false positives in all positive predictions, which is essential for understanding the reliability of positive test results.</td>
</tr>
<tr>
<td valign="top" align="left">False omission rate (FOR)</td>
<td valign="top" align="left">Measures the proportion of false negatives in all negative predictions, relevant in scenarios where negatives carry significant implications.</td>
</tr>
<tr>
<td valign="top" align="left">Misclassification rate (MR)</td>
<td valign="top" align="left">Measures the overall rate at which the model incorrectly classifies instances, giving an overall error rate of the system.</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s3"><label>3</label><title>Results and analysis</title>
<p>A comprehensive analysis of the outcomes derived from the detailed evaluation of the CNN models developed to classify working lengths in dental radiographs. Key performance metrics, including accuracy, F1-score, precision, and recall, were computed for all examined models, along with testing time to assess operational efficiency. An in-depth detection assessment was conducted on the most accurate custom-developed CNN model, encompassing all relevant evaluation parameters across the three designated WL categories. In terms of precision, the DCNN model achieved the highest score of 97.07&#x0025;, closely followed by EfficientNetB5 and VGG16, also known for their deep learning efficiency in image recognition tasks. The recall metric, critical for ensuring correct classification, was also highest for the DCNN model at 97.03&#x0025;. The DCNN model excelled in both accuracy and operational efficiency, achieving a test time of only 0.548546&#x2005;s. EfficientNetB5, VGG16, EfficientNetB1, VGG19, and EfficientNetB2 also achieved commendable accuracy, with scores above 95&#x0025;, underscoring the efficacy of advanced CNN architectures in handling complex image data. <xref ref-type="table" rid="T5">Table&#x00A0;5</xref> presents the detailed detection assessment parameters for each examined AI model.</p>
<table-wrap id="T5" position="float"><label>Table&#x00A0;5</label>
<caption><p>Detection assessment parameters of the examined AI models.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">AI model</th>
<th valign="top" align="center">Accuracy</th>
<th valign="top" align="center">F1-score</th>
<th valign="top" align="center">Precision</th>
<th valign="top" align="center">Recall</th>
<th valign="top" align="center">Testing time (sec)</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">DCNN (Scratch)</td>
<td valign="top" align="center"><bold>0.970297</bold></td>
<td valign="top" align="center"><bold>0.970390</bold></td>
<td valign="top" align="center"><bold>0.970702</bold></td>
<td valign="top" align="center"><bold>0.970297</bold></td>
<td valign="top" align="center"><bold>0.548546</bold></td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB5</td>
<td valign="top" align="center">0.960396</td>
<td valign="top" align="center">0.960613</td>
<td valign="top" align="center">0.961948</td>
<td valign="top" align="center">0.960396</td>
<td valign="top" align="center">6.703499</td>
</tr>
<tr>
<td valign="top" align="left">VGG16</td>
<td valign="top" align="center">0.957096</td>
<td valign="top" align="center">0.957219</td>
<td valign="top" align="center">0.957729</td>
<td valign="top" align="center">0.957096</td>
<td valign="top" align="center">7.026967</td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB1</td>
<td valign="top" align="center">0.957096</td>
<td valign="top" align="center">0.957182</td>
<td valign="top" align="center">0.957879</td>
<td valign="top" align="center">0.957095</td>
<td valign="top" align="center">2.888323</td>
</tr>
<tr>
<td valign="top" align="left">VGG19</td>
<td valign="top" align="center">0.950495</td>
<td valign="top" align="center">0.950694</td>
<td valign="top" align="center">0.951500</td>
<td valign="top" align="center">0.950495</td>
<td valign="top" align="center">9.019332</td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB2</td>
<td valign="top" align="center">0.953795</td>
<td valign="top" align="center">0.953937</td>
<td valign="top" align="center">0.954361</td>
<td valign="top" align="center">0.953795</td>
<td valign="top" align="center">3.007454</td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB7</td>
<td valign="top" align="center">0.947195</td>
<td valign="top" align="center">0.947434</td>
<td valign="top" align="center">0.947956</td>
<td valign="top" align="center">0.947195</td>
<td valign="top" align="center">12.19628</td>
</tr>
<tr>
<td valign="top" align="left">MobileNetV3Large</td>
<td valign="top" align="center">0.947195</td>
<td valign="top" align="center">0.947207</td>
<td valign="top" align="center">0.948072</td>
<td valign="top" align="center">0.947195</td>
<td valign="top" align="center">1.444528</td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB3</td>
<td valign="top" align="center">0.940594</td>
<td valign="top" align="center">0.940899</td>
<td valign="top" align="center">0.941771</td>
<td valign="top" align="center">0.940594</td>
<td valign="top" align="center">3.825811</td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB6</td>
<td valign="top" align="center">0.943894</td>
<td valign="top" align="center">0.943810</td>
<td valign="top" align="center">0.944299</td>
<td valign="top" align="center">0.943894</td>
<td valign="top" align="center">9.057463</td>
</tr>
<tr>
<td valign="top" align="left">ResNet50</td>
<td valign="top" align="center">0.930693</td>
<td valign="top" align="center">0.931268</td>
<td valign="top" align="center">0.933418</td>
<td valign="top" align="center">0.930693</td>
<td valign="top" align="center">4.158439</td>
</tr>
<tr>
<td valign="top" align="left">MobileNet</td>
<td valign="top" align="center">0.927393</td>
<td valign="top" align="center">0.927840</td>
<td valign="top" align="center">0.928913</td>
<td valign="top" align="center">0.927393</td>
<td valign="top" align="center">1.177610</td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB0</td>
<td valign="top" align="center">0.927393</td>
<td valign="top" align="center">0.927491</td>
<td valign="top" align="center">0.928669</td>
<td valign="top" align="center">0.927393</td>
<td valign="top" align="center">2.072861</td>
</tr>
<tr>
<td valign="top" align="left">DenseNet121</td>
<td valign="top" align="center">0.927393</td>
<td valign="top" align="center">0.927351</td>
<td valign="top" align="center">0.927517</td>
<td valign="top" align="center">0.927392</td>
<td valign="top" align="center">4.216988</td>
</tr>
<tr>
<td valign="top" align="left">EfficientNetB4</td>
<td valign="top" align="center">0.927393</td>
<td valign="top" align="center">0.927691</td>
<td valign="top" align="center">0.928396</td>
<td valign="top" align="center">0.927393</td>
<td valign="top" align="center">4.960827</td>
</tr>
<tr>
<td valign="top" align="left">MobileNetV3Small</td>
<td valign="top" align="center">0.920792</td>
<td valign="top" align="center">0.920765</td>
<td valign="top" align="center">0.921505</td>
<td valign="top" align="center">0.920792</td>
<td valign="top" align="center">0.878270</td>
</tr>
<tr>
<td valign="top" align="left">DenseNet169</td>
<td valign="top" align="center">0.920792</td>
<td valign="top" align="center">0.916644</td>
<td valign="top" align="center">0.922791</td>
<td valign="top" align="center">0.920792</td>
<td valign="top" align="center">5.148977</td>
</tr>
<tr>
<td valign="top" align="left">DenseNet201</td>
<td valign="top" align="center">0.920792</td>
<td valign="top" align="center">0.920544</td>
<td valign="top" align="center">0.922299</td>
<td valign="top" align="center">0.920792</td>
<td valign="top" align="center">6.699370</td>
</tr>
<tr>
<td valign="top" align="left">Xception</td>
<td valign="top" align="center">0.910891</td>
<td valign="top" align="center">0.910857</td>
<td valign="top" align="center">0.911724</td>
<td valign="top" align="center">0.910891</td>
<td valign="top" align="center">3.529581</td>
</tr>
<tr>
<td valign="top" align="left">InceptionV3</td>
<td valign="top" align="center">0.891089</td>
<td valign="top" align="center">0.891423</td>
<td valign="top" align="center">0.892822</td>
<td valign="top" align="center">0.891089</td>
<td valign="top" align="center">2.156583</td>
</tr>
<tr>
<td valign="top" align="left">MobileNetV2</td>
<td valign="top" align="center">0.864686</td>
<td valign="top" align="center">0.864373</td>
<td valign="top" align="center">0.868220</td>
<td valign="top" align="center">0.864686</td>
<td valign="top" align="center">1.380209</td>
</tr>
<tr>
<td valign="top" align="left">InceptionResNetV2</td>
<td valign="top" align="center">0.617161</td>
<td valign="top" align="center">0.614039</td>
<td valign="top" align="center">0.614189</td>
<td valign="top" align="center">0.617161</td>
<td valign="top" align="center">5.616386</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>The custom-developed deep CNN (DCNN) (Scratch) model achieved superior performance, with an accuracy of 97.02&#x0025; and an F1-score of 97.03&#x0025;, indicating highly reliable classification. <xref ref-type="table" rid="T6">Table&#x00A0;6</xref> outlines the detection assessment parameters for the DCNN model for the optimum, over-extended, and under-extended WL categories.</p>
<table-wrap id="T6" position="float"><label>Table&#x00A0;6</label>
<caption><p>Detailed detection assessment parameters of the most accurate DCNN model across the three examined working length samples.</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="center"/>
<col align="center"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left">Metric</th>
<th valign="top" align="center">Optimum WL</th>
<th valign="top" align="center">Over-extended WL</th>
<th valign="top" align="center">Under-extended WL</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Acc.</td>
<td valign="top" align="center">0.970297</td>
<td valign="top" align="center">0.990099</td>
<td valign="top" align="center">0.980198</td>
</tr>
<tr>
<td valign="top" align="left">TNR</td>
<td valign="top" align="center">0.970297</td>
<td valign="top" align="center">0.995050</td>
<td valign="top" align="center">0.990099</td>
</tr>
<tr>
<td valign="top" align="left">NPV</td>
<td valign="top" align="center">0.984925</td>
<td valign="top" align="center">0.990147</td>
<td valign="top" align="center">0.980392</td>
</tr>
<tr>
<td valign="top" align="left">FPR</td>
<td valign="top" align="center">0.029703</td>
<td valign="top" align="center">0.004950</td>
<td valign="top" align="center">0.009901</td>
</tr>
<tr>
<td valign="top" align="left">FNR</td>
<td valign="top" align="center">0.029702</td>
<td valign="top" align="center">0.019801</td>
<td valign="top" align="center">0.039604</td>
</tr>
<tr>
<td valign="top" align="left">PPV</td>
<td valign="top" align="center">0.942307</td>
<td valign="top" align="center">0.990000</td>
<td valign="top" align="center">0.979798</td>
</tr>
<tr>
<td valign="top" align="left">FDR</td>
<td valign="top" align="center">0.057692</td>
<td valign="top" align="center">0.010000</td>
<td valign="top" align="center">0.020202</td>
</tr>
<tr>
<td valign="top" align="left">TPR</td>
<td valign="top" align="center">0.970297</td>
<td valign="top" align="center">0.980198</td>
<td valign="top" align="center">0.960396</td>
</tr>
<tr>
<td valign="top" align="left">FOR</td>
<td valign="top" align="center">0.015075</td>
<td valign="top" align="center">0.009852</td>
<td valign="top" align="center">0.019608</td>
</tr>
<tr>
<td valign="top" align="left">F1-score</td>
<td valign="top" align="center">0.956097</td>
<td valign="top" align="center">0.985074</td>
<td valign="top" align="center">0.970000</td>
</tr>
<tr>
<td valign="top" align="left">MR</td>
<td valign="top" align="center">0.029703</td>
<td valign="top" align="center">0.009900</td>
<td valign="top" align="center">0.019802</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Additionally, to conduct a deep analysis, we evaluated the performance of the top six CNN models based on their accuracy and loss curves, and the precision of their classification capabilities as depicted in their confusion matrices. These analyses provide insights into the models&#x2019; abilities to learn and generalize from the training data, as well as their precision in classifying working lengths.</p>
<p>The accuracy and loss curves, illustrated in <xref ref-type="fig" rid="F5">Figure&#x00A0;5 [(a)&#x2013;(f)]</xref>, represent key indicators of model performance over training epochs for the top-performing models such as DCNN, EfficientNetB5, VGG16, EfficientNetB1, VGG19, and EfficientNetB2.</p>
<fig id="F5" position="float"><label>Figure&#x00A0;5</label>
<caption><p>Accuracy and loss curves for the top six most accurate AI models.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdmed-07-1730454-g005.tif"><alt-text content-type="machine-generated">Six line graphs compare training and validation accuracy and loss across epochs for DCNN, EfficientNetB5, VGG16, EfficientNetB1, VGG19, and EfficientNetB2 models. Each plot shows four curves: train-acc, val-acc, train-loss, val-loss. Training accuracy rises while training loss falls for all models; validation accuracy and loss stabilize, with validation loss generally remaining above training loss for each model.</alt-text>
</graphic>
</fig>
<p>In <xref ref-type="fig" rid="F5">Figure&#x00A0;5(a)</xref>, a slight increase in the validation loss is observed after approximately epoch 40. This corresponds to the inflection point where the DCNN achieves its optimal generalization. Beyond this stage, further minimization of the training objective can lead to mild overfitting, as the learned feature representations become increasingly tailored to the training data. Moreover, because categorical cross-entropy is highly sensitive to prediction confidence, small variations in probability estimates for challenging validation instances can cause the loss value to rise without a corresponding decrease in classification accuracy. To ensure optimal performance, the system used a model checkpointing approach, saving and loading the best parameters identified before this trend emerged, thereby maintaining the stability and reliability reported in the final evaluation metrics.</p>
<p>Notably, the DCNN model exhibits rapid stabilization in accuracy and a corresponding decrease in loss, achieving both high training accuracy and low validation loss early in training. This suggests an effective learning process with minimal overfitting. EfficientNetB5 and VGG16 also show exemplary convergence behavior, indicating robustness in their learning mechanisms.</p>
<p><xref ref-type="fig" rid="F6">Figure&#x00A0;6 [(a)&#x2013;(f)]</xref> displays the confusion matrices for the same subset of top-performing CNN models, offering a breakdown of their predictive performance across the three classified working lengths: optimum, over-extended, and under-extended. The matrices show high true-positive rates across all six CNN models, with the DCNN achieving near-perfect classification across all categories. The EfficientNetB5 model shows a slight misclassification between the optimal and over-extended categories, suggesting opportunities to improve at distinguishing closely related classes.</p>
<fig id="F6" position="float"><label>Figure&#x00A0;6</label>
<caption><p>Confusion matrices for the top six most accurate AI models.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdmed-07-1730454-g006.tif"><alt-text content-type="machine-generated">Six confusion matrix heatmaps compare classification results for three classes&#x2014;Optimum, OverExtended, and UnderExtended&#x2014;across six models: DCNN (Scratch), EfficientNetB5, VGG16, EfficientNetB1, VGG19, and EfficientNetB2. Each matrix presents class prediction accuracy, misclassification frequencies, and color-coded intensity, with most correct predictions on the diagonal and few errors off-diagonal.</alt-text>
</graphic>
</fig>
</sec>
<sec id="s4"><label>4</label><title>Web-based smart WL feedback system: case study</title>
<p>The best AI model was integrated into a web-based service that allows students to submit their work (WL radiograph) and receive prompt, constructive feedback without delay. For instance, consider the three different inputs from three students, as shown in <xref ref-type="fig" rid="F7">Figure&#x00A0;7</xref>. The feedback coming from our automated smart system was as follows:
<list list-type="simple">
<list-item>
<p>For (<bold>code 0</bold>), <xref ref-type="fig" rid="F7">Figure&#x00A0;7A</xref>, the system classifies it as an optimal position since the file tip is within 2 millimeters (mm) of the radiographic apex. The student is then instructed to proceed to the next step, which involves chemo-mechanical root canal preparation.</p></list-item>
<list-item>
<p>For (<bold>code 1</bold>), <xref ref-type="fig" rid="F7">Figure&#x00A0;7B</xref>, the system classifies it as under-extended since the file tip falls short of the radiographic apex by more than 2&#x2005;mm. The student is then instructed to adjust the file length by increasing it, re-inserting it, and taking a new radiograph.</p></list-item>
<list-item>
<p>For (<bold>code 2</bold>), <xref ref-type="fig" rid="F7">Figure&#x00A0;7C</xref>, the system classifies it as over-extended since the file tip extends beyond the radiographic apex by more than 2&#x2005;mm. The student is then instructed to adjust the file length by reducing it, re-inserting it, and taking a new radiograph.</p></list-item>
</list></p>
<fig id="F7" position="float"><label>Figure&#x00A0;7</label>
<caption><p><bold>(A)</bold> case study student&#x0027;s submission of optimal WL radiograph to the AI tool and the feedback received promptly. <bold>(B)</bold> Case study student&#x0027;s submission of underextended WL radiograph to the AI tool and the feedback received promptly. <bold>(C)</bold> Case study student&#x0027;s submission of overextended WL radiograph to the AI tool and the feedback received promptly.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="fdmed-07-1730454-g007.tif"><alt-text content-type="machine-generated">Dental radiograph showing a single tooth with an endodontic file inserted in the root canal, alongside an analysis panel stating the result as optimal and recommending step-back technique for apical preparation. A dental radiograph displays a tooth with an endodontic file that does not reach the root apex. The analysis panel on the right indicates an under-extended result, recommending increasing file length and repeating the X-ray. Dental X-ray analysis interface shows a WL radiograph on the left and an automated result on the right indicating \x93Over-extended.\x94 Feedback recommends file length adjustment as the file tip exceeds 2 millimeters beyond the apex.</alt-text>
</graphic>
</fig>
<p>Thirty dental students completed the survey evaluating the AI-assisted laboratory feedback tool. These 30 students, representing approximately 10&#x0025; of the total enrolled pre-clinical cohort, were selected for piloting purposes.</p>
<p>The sampling frame included all pre-clinical students at the end of their laboratory training period. Inclusion criteria were A) completion of the pre-clinical endodontic laboratory course, and B) willingness to test the web-based tool. Exclusion criteria included: A) incomplete laboratory attendance, or B) prior participation in tool development. No compensation or incentives were offered; participation was voluntary.</p>
<p>The overall mean rating was 4.67/5, indicating strong satisfaction. The highest-rated item was &#x2018;<italic>Instructions for using the tool were clear and sufficient</italic>&#x2019; (mean&#x2009;&#x003D;&#x2009;4.74). The remaining rates for the other items questioned in the survey are listed in <xref ref-type="table" rid="T7">Table&#x00A0;7</xref>. However, because Likert data are ordinal, the median score across all items was 5.0, with IQR values ranging from 4 to 5 to 5&#x2013;5.</p>
<table-wrap id="T7" position="float"><label>Table&#x00A0;7</label>
<caption><p>Summary of student responses to the AI-assisted feedback tool (<italic>n</italic>&#x2009;&#x003D;&#x2009;30).</p></caption>
<table>
<colgroup>
<col align="left"/>
<col align="center"/>
<col align="left"/>
</colgroup>
<thead>
<tr>
<th valign="top" align="left" colspan="3">Qualitative study results analysis</th>
</tr>
<tr>
<th valign="top" align="left">Statement</th>
<th valign="top" align="center">Mean&#x2009;&#x00B1;&#x2009;SD</th>
<th valign="top" align="center">Interpretation</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">The tool was easy to set up and use</td>
<td valign="top" align="center">4.71&#x2009;&#x00B1;&#x2009;0.46</td>
<td valign="top" align="left">Strongly agree</td>
</tr>
<tr>
<td valign="top" align="left">Instructions for using the tool were clear and sufficient</td>
<td valign="top" align="center">4.74&#x2009;&#x00B1;&#x2009;0.44</td>
<td valign="top" align="left">Strongly agree</td>
</tr>
<tr>
<td valign="top" align="left">The tool functioned reliably without technical difficulties</td>
<td valign="top" align="center">4.68&#x2009;&#x00B1;&#x2009;0.47</td>
<td valign="top" align="left">Strongly agree</td>
</tr>
<tr>
<td valign="top" align="left">Using the tool allowed me to complete the laboratory tasks efficiently</td>
<td valign="top" align="center">4.55&#x2009;&#x00B1;&#x2009;0.51</td>
<td valign="top" align="left">Agree</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>Internal consistency of the questionnaire was assessed using Cronbach&#x0027;s alpha, which demonstrated excellent reliability (<italic>&#x03B1;</italic>&#x2009;&#x003D;&#x2009;0.96).</p>
<p>The questionnaire consisted of 10 items covering usability, clarity of instructions, reliability, feedback quality, and perceived learning enhancement. Items were scored using a five-point Likert scale (1&#x2009;&#x003D;&#x2009;strongly disagree to 5&#x2009;&#x003D;&#x2009;strongly agree). Although this was a custom instrument tailored to the WL determination context, a standardized metric such as SUS or UMUX-Lite will be considered in future work for broader comparability.</p>
<p>Sixteen female students and fourteen males participated in this survey. The web-based tool is hosted on a web server, and its link was shared with the relevant parties. Students consistently agreed that the tool was easy to use, provided clear and timely feedback, and supported efficient task completion. Overall, the tool was perceived as reliable, user-friendly, and beneficial for enhancing learning in endodontic laboratory training.</p>
</sec>
<sec id="s5"><label>5</label><title>Discussions</title>
<p>In Endodontics, AI is increasingly integrated into the technical assessment of root canal treatment through radiographic examination (<xref ref-type="bibr" rid="B19">19</xref>, <xref ref-type="bibr" rid="B26">26</xref>, <xref ref-type="bibr" rid="B36">36</xref>, <xref ref-type="bibr" rid="B50">50</xref>, <xref ref-type="bibr" rid="B51">51</xref>). Stages such as WL determination are critical steps in root canal treatment and directly affect its success (<xref ref-type="bibr" rid="B6">6</xref>, <xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B32">32</xref>). Providing personalized feedback to each case during endodontic training can be challenging, but it is essential for enhancing effective learning, especially in large classrooms (<xref ref-type="bibr" rid="B33">33</xref>, <xref ref-type="bibr" rid="B52">52</xref>, <xref ref-type="bibr" rid="B53">53</xref>).</p>
<p>This experimental study aimed to significantly enhance the precision and reliability of endodontic pre-clinical training by integrating AI Machine learning models with dental radiography. This proposed model incorporated both pre-trained and custom-developed 22 CNN models. The matrices show high true-positive rates across all six CNN models, with the DCNN achieving near-perfect classification across all categories.</p>
<p>The efficiency of clinical training in dental education, specifically within the context of endodontic training protocols, is fundamentally contingent upon accurately determining working length (WL) 4,6,7. Leveraging advanced CNN capabilities, this study introduces an AI-driven framework that enhances the precision of these measurements.</p>
<p>This precise labeling protocol was critical for training the CNN models, as it encapsulated various scenarios a dental practitioner may encounter, thereby enhancing their accuracy and reliability. A well-balanced and refined dataset of 3,000 samples was created, with care taken to maintain consistent image quality and resolution, ensuring each radiograph was suitable for detailed analysis and machine learning applications. The categorization of radiographic WL as optimal, under-extended, and over-extended WL was considered based on the recommendations for radiographic WL measurements in the literature (<xref ref-type="bibr" rid="B6">6</xref>&#x2013;<xref ref-type="bibr" rid="B8">8</xref>). In addition, the distribution of images was balanced across the three categories to avoid dataset bias and enhance the validity of the AI-based learning process.</p>
<p>The DCNN model excelled in both accuracy and operational efficiency, achieving a test time of only 0.5&#x2005;s. This rapid testing capability is advantageous for real-time applications, enabling the model to be integrated smoothly and providing output without significant delays. This high recall rate is essential in clinical settings where missing or delaying diagnostic information could lead to poor procedural outcomes.</p>
<p>This high accuracy and balanced F1 score suggest that the model effectively manages the trade-off between precision and recall, which is crucial for medical applications where false positives and false negatives carry significant consequences.</p>
<p>The accuracy of the DCNN AI-driven model developed in this study (reaching up to 99&#x0025;) exceeded the accuracy of other AI-driven models exploring WL determination (ranging from 93&#x0025; to 96&#x0025;) (<xref ref-type="bibr" rid="B29">29</xref>, <xref ref-type="bibr" rid="B30">30</xref>, <xref ref-type="bibr" rid="B54">54</xref>). This could be justified by the integration of both empirical and advanced AI techniques in this study, in addition to the robust and large dataset used to train this model. The use of a case study for a trial of this model was proven to provide effective feedback to the three main categories of WL radiographs. This indicates that automated, accurate, and quick feedback responses will be provided to students, assisting and supporting instructors in offering constructive feedback during this critical step (<xref ref-type="bibr" rid="B33">33</xref>, <xref ref-type="bibr" rid="B55">55</xref>, <xref ref-type="bibr" rid="B56">56</xref>). The AI-based feedback model developed through this work could be employed to deliver instant, precise, and constructive feedback to students. This custom model includes specialized layers and training strategies designed to enhance feature extraction directly pertinent to the nuances of endodontic imaging.</p>
<p>This study represents a laboratory-based exploration of AI and machine learning development, making it one of the first initiatives in this direction. The use of a large, well-organized, and quality-assured dataset of WL radiographic images has contributed to the development of a robust model. The high satisfaction scores indicate that the AI-assisted feedback tool was well accepted by students and effectively supported their learning in pre-clinical endodontic sessions. The tool&#x0027;s clarity, ease of use, and instant feedback appear to enhance both efficiency and confidence during laboratory tasks. Such positive perceptions support the continued integration of AI-driven systems into pre-clinical teaching to promote active and self-directed learning.</p>
<p>The limitations of this study must be acknowledged. Since all radiographs were obtained from a single institution using a single imaging software and device, this can limit generalizability across clinics using different radiographic sensors with varying specs.</p>
<p>Although the model performed well diagnostically, this study did not test whether AI-supported feedback improves learning outcomes. Future controlled research, such as a randomized comparison with standard instruction, is required to determine educational effectiveness. This is an alpha version of the tool, and we are improving it.</p>
</sec>
<sec id="s6" sec-type="conclusions"><label>6</label><title>Conclusions</title>
<p>This study presents the development of an AI-powered automated feedback system designed to enhance pre-clinical endodontic training by assisting students in determining radiographic working length (WL). A newly labeled dataset of 3,000 radiographs was used to train 22 convolutional neural network (CNN) models, achieving up to 99&#x0025; accuracy, with the best-performing model selected for implementation. The system provides instant, constructive feedback to support student learning and technical improvement. Future work will focus on expanding the dataset, testing additional AI architectures, and integrating other stages of root canal treatment into a unified intelligent training platform for dental education.</p>
</sec>
</body>
<back>
<sec id="s7" sec-type="data-availability"><title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s8" sec-type="author-contributions"><title>Author contributions</title>
<p>SA: Writing &#x2013; original draft, Resources, Conceptualization, Writing &#x2013; review &#x0026; editing, Data curation, Supervision, Project administration. IA: Writing &#x2013; original draft, Methodology, Conceptualization, Visualization, Validation, Writing &#x2013; review &#x0026; editing, Supervision, Formal analysis, Project administration, Resources. WE-S: Formal analysis, Software, Validation, Visualization, Writing &#x2013; original draft. MA-A: Conceptualization, Methodology, Validation, Visualization, Writing &#x2013; original draft. AA: Data curation, Writing &#x2013; original draft. RA: Data curation, Formal analysis, Writing &#x2013; original draft.</p>
</sec>
<sec id="s10" sec-type="COI-statement"><title>Conflict of interest</title>
<p>The author(s) declared that this work was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="ai-statement"><title>Generative AI statement</title>
<p>The author(s) declared that generative AI was not used in the creation of this manuscript.</p>
<p>Any alternative text (alt text) provided alongside figures in this article has been generated by Frontiers with the support of artificial intelligence and reasonable efforts have been made to ensure accuracy, including review by the authors wherever possible. If you identify any issues, please contact us.</p>
</sec>
<sec id="s12" sec-type="disclaimer"><title>Publisher&#x0027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list><title>References</title>
<ref id="B1"><label>1.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Carrotte</surname> <given-names>PV</given-names></name></person-group>. <article-title>An introduction to endodontics</article-title>. <source>BDJ Team</source>. (<year>2021</year>) <volume>8</volume>(<issue>4</issue>):<fpage>31</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1038/s41407-021-0582-3</pub-id></mixed-citation></ref>
<ref id="B2"><label>2.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Kakehashi</surname> <given-names>S</given-names></name> <name><surname>Stanley</surname> <given-names>H</given-names></name> <name><surname>Fitzgerald</surname> <given-names>R</given-names></name></person-group>. <article-title>The effects of surgical exposures of dental pulps in germ-free and conventional laboratory rats</article-title>. <source>Oral Surg Oral Med Oral Pathol</source>. (<year>1965</year>) <volume>20</volume>(<issue>3</issue>):<fpage>340</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/0030-4220(65)90166-0</pub-id><pub-id pub-id-type="pmid">14342926</pub-id></mixed-citation></ref>
<ref id="B3"><label>3.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Estrela</surname> <given-names>C</given-names></name> <name><surname>Holland</surname> <given-names>R</given-names></name> <name><surname>Estrela</surname> <given-names>C</given-names></name> <name><surname>Alencar</surname> <given-names>AHG</given-names></name> <name><surname>Sousa-Neto</surname> <given-names>MD</given-names></name> <name><surname>P&#x00E9;cora</surname> <given-names>JD</given-names></name></person-group>. <article-title>Characterization of successful root canal treatment</article-title>. <source>Braz Dent J</source>. (<year>2014</year>) <volume>25</volume>:<fpage>3</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1590/0103-6440201302356</pub-id><pub-id pub-id-type="pmid">24789284</pub-id></mixed-citation></ref>
<ref id="B4"><label>4.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bhatt</surname> <given-names>A</given-names></name> <name><surname>Gupta</surname> <given-names>V</given-names></name> <name><surname>Rajkumar</surname> <given-names>B</given-names></name> <name><surname>Arora</surname> <given-names>R</given-names></name></person-group>. <article-title>Working length determination-the soul of root canal therapy a review</article-title>. <source>Int J Dent Helt Sci</source>. (<year>2015</year>) <volume>2</volume>(<issue>1</issue>):<fpage>105</fpage>&#x2013;<lpage>15</lpage>.</mixed-citation></ref>
<ref id="B5"><label>5.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Schilder</surname> <given-names>H</given-names></name></person-group>. <article-title>Cleaning and shaping the root canal</article-title>. <source>Dent Clin North Am</source>. (<year>1974</year>) <volume>18</volume>(<issue>2</issue>):<fpage>269</fpage>&#x2013;<lpage>96</lpage>. <pub-id pub-id-type="doi">10.1016/S0011-8532(22)00677-2</pub-id><pub-id pub-id-type="pmid">4522570</pub-id></mixed-citation></ref>
<ref id="B6"><label>6.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Sj&#x00F6;gren</surname> <given-names>U</given-names></name> <name><surname>H&#x00E4;gglund</surname> <given-names>B</given-names></name> <name><surname>Sundqvist</surname> <given-names>G</given-names></name> <name><surname>Wing</surname> <given-names>K</given-names></name></person-group>. <article-title>Factors affecting the long-term results of endodontic treatment</article-title>. <source>J Endod</source>. (<year>1990</year>) <volume>16</volume>(<issue>10</issue>):<fpage>498</fpage>&#x2013;<lpage>504</lpage>. <pub-id pub-id-type="doi">10.1016/S0099-2399(07)80180-4</pub-id></mixed-citation></ref>
<ref id="B7"><label>7.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ricucci</surname> <given-names>L</given-names></name></person-group>. <article-title>Apical limit of root canal instrumentation and obturation, part 2. A histological study</article-title>. <source>Int Endod J</source>. (<year>1998</year>) <volume>31</volume>(<issue>6</issue>):<fpage>394</fpage>&#x2013;<lpage>409</lpage>. <pub-id pub-id-type="doi">10.1046/j.1365-2591.1998.00183.x</pub-id><pub-id pub-id-type="pmid">15551607</pub-id></mixed-citation></ref>
<ref id="B8"><label>8.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Gordon</surname> <given-names>M</given-names></name> <name><surname>Chandler</surname> <given-names>N</given-names></name></person-group>. <article-title>Electronic apex locators</article-title>. <source>Int Endod J</source>. (<year>2004</year>) <volume>37</volume>(<issue>7</issue>):<fpage>425</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2591.2004.00835.x</pub-id><pub-id pub-id-type="pmid">15189431</pub-id></mixed-citation></ref>
<ref id="B9"><label>9.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ng</surname> <given-names>YL</given-names></name> <name><surname>Mann</surname> <given-names>V</given-names></name> <name><surname>Gulabivala</surname> <given-names>K</given-names></name></person-group>. <article-title>Tooth survival following non-surgical root canal treatment: a systematic review of the literature</article-title>. <source>Int Endod J</source>. (<year>2010</year>) <volume>43</volume>(<issue>3</issue>):<fpage>171</fpage>&#x2013;<lpage>89</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2591.2009.01671.x</pub-id><pub-id pub-id-type="pmid">20158529</pub-id></mixed-citation></ref>
<ref id="B10"><label>10.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Zadik</surname> <given-names>Y</given-names></name> <name><surname>Sandler</surname> <given-names>V</given-names></name> <name><surname>Bechor</surname> <given-names>R</given-names></name> <name><surname>Salehrabi</surname> <given-names>R</given-names></name></person-group>. <article-title>Analysis of factors related to extraction of endodontically treated teeth</article-title>. <source>Oral Surg Oral Med Oral Pathol Oral Radiol Endodontol</source>. (<year>2008</year>) <volume>106</volume>(<issue>5</issue>):<fpage>e31</fpage>&#x2013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1016/j.tripleo.2008.06.017</pub-id></mixed-citation></ref>
<ref id="B11"><label>11.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Stabholz</surname> <given-names>A</given-names></name> <name><surname>Rotstein</surname> <given-names>I</given-names></name> <name><surname>Torabinejad</surname> <given-names>M</given-names></name></person-group>. <article-title>Effect of preflaring on tactile detection of the apical constriction</article-title>. <source>J Endod</source>. (<year>1995</year>) <volume>21</volume>(<issue>2</issue>):<fpage>92</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1016/S0099-2399(06)81103-9</pub-id><pub-id pub-id-type="pmid">7714445</pub-id></mixed-citation></ref>
<ref id="B12"><label>12.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Chandler</surname> <given-names>N</given-names></name> <name><surname>Bloxham</surname> <given-names>G</given-names></name></person-group>. <article-title>Effect of gloves on tactile discrimination using an endodontic model</article-title>. <source>Int Endod J</source>. (<year>1990</year>) <volume>23</volume>(<issue>2</issue>):<fpage>97</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2591.1990.tb00846.x</pub-id><pub-id pub-id-type="pmid">2391186</pub-id></mixed-citation></ref>
<ref id="B13"><label>13.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Jenkins</surname> <given-names>JA</given-names></name> <name><surname>Walker</surname><given-names>WA</given-names><suffix>III</suffix></name> <name><surname>Schindler</surname> <given-names>WG</given-names></name> <name><surname>Flores</surname> <given-names>CM</given-names></name></person-group>. <article-title>An <italic>in vitro</italic> evaluation of the accuracy of the root ZX in the presence of various irrigants</article-title>. <source>J Endod</source>. (<year>2001</year>) <volume>27</volume>(<issue>3</issue>):<fpage>209</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1097/00004770-200103000-00018</pub-id><pub-id pub-id-type="pmid">11487154</pub-id></mixed-citation></ref>
<ref id="B14"><label>14.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Trope</surname> <given-names>M</given-names></name> <name><surname>Rabie</surname> <given-names>G</given-names></name> <name><surname>Tronstad</surname> <given-names>L</given-names></name></person-group>. <article-title>Accuracy of an electronic apex locator under controlled clinical conditions</article-title>. <source>Dent Traumatol</source>. (<year>1985</year>) <volume>1</volume>(<issue>4</issue>):<fpage>142</fpage>&#x2013;<lpage>5</lpage>. <pub-id pub-id-type="doi">10.1111/j.1600-9657.1985.tb00579.x</pub-id></mixed-citation></ref>
<ref id="B15"><label>15.</label><mixed-citation publication-type="journal"><collab>Endodontology ESo</collab>. <article-title>Quality guidelines for endodontic treatment: consensus report of the European society of endodontology</article-title>. <source>Int Endod J</source>. (<year>2006</year>) <volume>39</volume>(<issue>12</issue>):<fpage>921</fpage>&#x2013;<lpage>30</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2591.2006.01180.x</pub-id><pub-id pub-id-type="pmid">17180780</pub-id></mixed-citation></ref>
<ref id="B16"><label>16.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Orafi</surname> <given-names>I</given-names></name> <name><surname>Rushton</surname> <given-names>V</given-names></name></person-group>. <article-title>The use of radiography and the apex locator in endodontic treatment within the UK: a comparison between endodontic specialists and general dental practitioners</article-title>. <source>Int Endod J</source>. (<year>2013</year>) <volume>46</volume>(<issue>4</issue>):<fpage>355</fpage>&#x2013;<lpage>64</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2591.2012.02127.x</pub-id><pub-id pub-id-type="pmid">23094654</pub-id></mixed-citation></ref>
<ref id="B17"><label>17.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ahmed</surname> <given-names>ZH</given-names></name> <name><surname>Almuharib</surname> <given-names>AM</given-names></name> <name><surname>Abdulkarim</surname> <given-names>AA</given-names></name> <name><surname>Alhassoon</surname> <given-names>AH</given-names></name> <name><surname>Alanazi</surname> <given-names>AF</given-names></name> <name><surname>Alhaqbani</surname> <given-names>MA</given-names></name><etal/></person-group> <article-title>Artificial intelligence and its application in endodontics: a review</article-title>. <source>J Contemp Dent Pract</source>. (<year>2024</year>) <volume>24</volume>(<issue>11</issue>):<fpage>912</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.5005/jp-journals-10024-3593</pub-id></mixed-citation></ref>
<ref id="B18"><label>18.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bonny</surname> <given-names>T</given-names></name> <name><surname>Al Nassan</surname> <given-names>W</given-names></name> <name><surname>Obaideen</surname> <given-names>K</given-names></name> <name><surname>Al Mallahi</surname> <given-names>MN</given-names></name> <name><surname>Mohammad</surname> <given-names>Y</given-names></name> <name><surname>El-Damanhoury</surname> <given-names>HM</given-names></name></person-group>. <article-title>Contemporary role and applications of artificial intelligence in dentistry</article-title>. <source>F1000Res</source>. (<year>2023</year>) <volume>12</volume>:<fpage>1</fpage>&#x2013;<lpage>27</lpage>. <pub-id pub-id-type="doi">10.12688/f1000research.140204.1</pub-id></mixed-citation></ref>
<ref id="B19"><label>19.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Endres</surname> <given-names>MG</given-names></name> <name><surname>Hillen</surname> <given-names>F</given-names></name> <name><surname>Salloumis</surname> <given-names>M</given-names></name> <name><surname>Sedaghat</surname> <given-names>AR</given-names></name> <name><surname>Niehues</surname> <given-names>SM</given-names></name> <name><surname>Quatela</surname> <given-names>O</given-names></name><etal/></person-group> <article-title>Development of a deep learning algorithm for periapical disease detection in dental radiographs</article-title>. <source>Diagnostics</source>. (<year>2020</year>) <volume>10</volume>(<issue>6</issue>):<fpage>430</fpage>. <pub-id pub-id-type="doi">10.3390/diagnostics10060430</pub-id><pub-id pub-id-type="pmid">32599942</pub-id></mixed-citation></ref>
<ref id="B20"><label>20.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Marwaha</surname> <given-names>J</given-names></name></person-group>. <article-title>Artificial intelligence in conservative dentistry and endodontics: a game-changer</article-title>. <source>J Conserv Dentistry Endodontics</source>. (<year>2023</year>) <volume>26</volume>(<issue>5</issue>):<fpage>514</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.4103/JCDE.JCDE_7_23</pub-id></mixed-citation></ref>
<ref id="B21"><label>21.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prados-Privado</surname> <given-names>M</given-names></name> <name><surname>Garc&#x00ED;a Villal&#x00F3;n</surname> <given-names>J</given-names></name> <name><surname>Mart&#x00ED;nez-Mart&#x00ED;nez</surname> <given-names>CH</given-names></name> <name><surname>Ivorra</surname> <given-names>C</given-names></name> <name><surname>Prados-Frutos</surname> <given-names>JC</given-names></name></person-group>. <article-title>Dental caries diagnosis and detection using neural networks: a systematic review</article-title>. <source>J Clin Med</source>. (<year>2020</year>) <volume>9</volume>(<issue>11</issue>):<fpage>3579</fpage>. <pub-id pub-id-type="doi">10.3390/jcm9113579</pub-id><pub-id pub-id-type="pmid">33172056</pub-id></mixed-citation></ref>
<ref id="B22"><label>22.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>J</given-names></name> <name><surname>Seo</surname> <given-names>H</given-names></name> <name><surname>Choi</surname> <given-names>YJ</given-names></name> <name><surname>Lee</surname> <given-names>C</given-names></name> <name><surname>Kim</surname> <given-names>S</given-names></name> <name><surname>Lee</surname> <given-names>YS</given-names></name><etal/></person-group> <article-title>An endodontic forecasting model based on the analysis of preoperative dental radiographs: a pilot study on an endodontic predictive deep neural network</article-title>. <source>J Endod</source>. (<year>2023</year>) <volume>49</volume>(<issue>6</issue>):<fpage>710</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1016/j.joen.2023.03.015</pub-id><pub-id pub-id-type="pmid">37019378</pub-id></mixed-citation></ref>
<ref id="B23"><label>23.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Orhan</surname> <given-names>K</given-names></name> <name><surname>Bayrakdar</surname> <given-names>I</given-names></name> <name><surname>Ezhov</surname> <given-names>M</given-names></name> <name><surname>Kravtsov</surname> <given-names>A</given-names></name> <name><surname>&#x00D6;zy&#x00FC;rek</surname> <given-names>T</given-names></name></person-group>. <article-title>Evaluation of artificial intelligence for detecting periapical pathosis on cone-beam computed tomography scans</article-title>. <source>Int Endod J</source>. (<year>2020</year>) <volume>53</volume>(<issue>5</issue>):<fpage>680</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1111/iej.13265</pub-id><pub-id pub-id-type="pmid">31922612</pub-id></mixed-citation></ref>
<ref id="B24"><label>24.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Johari</surname> <given-names>M</given-names></name> <name><surname>Esmaeili</surname> <given-names>F</given-names></name> <name><surname>Andalib</surname> <given-names>A</given-names></name> <name><surname>Garjani</surname> <given-names>S</given-names></name> <name><surname>Saberkari</surname> <given-names>H</given-names></name></person-group>. <article-title>Detection of vertical root fractures in intact and endodontically treated premolar teeth by designing a probabilistic neural network: an <italic>ex vivo</italic> study</article-title>. <source>Dentomaxillofacial Radiology</source>. (<year>2017</year>) <volume>46</volume>(<issue>2</issue>):<fpage>20160107</fpage>. <pub-id pub-id-type="doi">10.1259/dmfr.20160107</pub-id><pub-id pub-id-type="pmid">27786566</pub-id></mixed-citation></ref>
<ref id="B25"><label>25.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nardi</surname> <given-names>C</given-names></name> <name><surname>Calistri</surname> <given-names>L</given-names></name> <name><surname>Pradella</surname> <given-names>S</given-names></name> <name><surname>Desideri</surname> <given-names>I</given-names></name> <name><surname>Lorini</surname> <given-names>C</given-names></name> <name><surname>Colagrande</surname> <given-names>S</given-names></name></person-group>. <article-title>Accuracy of orthopantomography for apical periodontitis without endodontic treatment</article-title>. <source>J Endod</source>. (<year>2017</year>) <volume>43</volume>(<issue>10</issue>):<fpage>1640</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1016/j.joen.2017.06.020</pub-id><pub-id pub-id-type="pmid">28807372</pub-id></mixed-citation></ref>
<ref id="B26"><label>26.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hasan</surname> <given-names>HA</given-names></name> <name><surname>Saad</surname> <given-names>FH</given-names></name> <name><surname>Ahmed</surname> <given-names>S</given-names></name> <name><surname>Mohammed</surname> <given-names>N</given-names></name> <name><surname>Farook</surname> <given-names>TH</given-names></name> <name><surname>Dudley</surname> <given-names>J</given-names></name></person-group>. <article-title>Experimental validation of computer-vision methods for the successful detection of endodontic treatment obturation and progression from noisy radiographs</article-title>. <source>Oral Radiol</source>. (<year>2023</year>) <volume>39</volume>(<issue>4</issue>):<fpage>683</fpage>&#x2013;<lpage>98</lpage>. <pub-id pub-id-type="doi">10.1007/s11282-023-00685-8</pub-id><pub-id pub-id-type="pmid">37097541</pub-id></mixed-citation></ref>
<ref id="B27"><label>27.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Khanagar</surname> <given-names>SB</given-names></name> <name><surname>Al-Ehaideb</surname> <given-names>A</given-names></name> <name><surname>Vishwanathaiah</surname> <given-names>S</given-names></name> <name><surname>Maganur</surname> <given-names>PC</given-names></name> <name><surname>Patil</surname> <given-names>S</given-names></name> <name><surname>Naik</surname> <given-names>S</given-names></name><etal/></person-group> <article-title>Scope and performance of artificial intelligence technology in orthodontic diagnosis, treatment planning, and clinical decision-making-a systematic review</article-title>. <source>J Dent Sci</source>. (<year>2021</year>) <volume>16</volume>(<issue>1</issue>):<fpage>482</fpage>&#x2013;<lpage>92</lpage>. <pub-id pub-id-type="doi">10.1016/j.jds.2020.05.022</pub-id><pub-id pub-id-type="pmid">33384838</pub-id></mixed-citation></ref>
<ref id="B28"><label>28.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Karobari</surname> <given-names>MI</given-names></name> <name><surname>Adil</surname> <given-names>AH</given-names></name> <name><surname>Basheer</surname> <given-names>SN</given-names></name> <name><surname>Murugesan</surname> <given-names>S</given-names></name> <name><surname>Savadamoorthi</surname> <given-names>KS</given-names></name> <name><surname>Mustafa</surname> <given-names>M</given-names></name><etal/></person-group> <article-title>Evaluation of the diagnostic and prognostic accuracy of artificial intelligence in endodontic dentistry: a comprehensive review of literature</article-title>. <source>Comput Math Methods Med</source>. (<year>2023</year>) <volume>2023</volume>:<fpage>1</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1155/2023/7049360</pub-id></mixed-citation></ref>
<ref id="B29"><label>29.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saghiri</surname> <given-names>MA</given-names></name> <name><surname>Asgar</surname> <given-names>K</given-names></name> <name><surname>Boukani</surname> <given-names>KK</given-names></name> <name><surname>Lotfi</surname> <given-names>M</given-names></name> <name><surname>Aghili</surname> <given-names>H</given-names></name> <name><surname>Delvarani</surname> <given-names>A</given-names></name><etal/></person-group> <article-title>A new approach for locating the minor apical foramen using an artificial neural network</article-title>. <source>Int Endod J</source>. (<year>2012</year>) <volume>45</volume>(<issue>3</issue>):<fpage>257</fpage>&#x2013;<lpage>65</lpage>. <pub-id pub-id-type="doi">10.1111/j.1365-2591.2011.01970.x</pub-id><pub-id pub-id-type="pmid">22007705</pub-id></mixed-citation></ref>
<ref id="B30"><label>30.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Qiao</surname> <given-names>X</given-names></name> <name><surname>Zhang</surname> <given-names>Z</given-names></name> <name><surname>Chen</surname> <given-names>X</given-names></name></person-group>. <article-title>Multifrequency impedance method based on neural network for root canal length measurement</article-title>. <source>Appl Sci</source>. (<year>2020</year>) <volume>10</volume>(<issue>21</issue>):<fpage>7430</fpage>. <pub-id pub-id-type="doi">10.3390/app10217430</pub-id></mixed-citation></ref>
<ref id="B31"><label>31.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Seijo</surname> <given-names>MO</given-names></name> <name><surname>Ferreira</surname> <given-names>EF</given-names></name> <name><surname>Ribeiro Sobrinho</surname> <given-names>AP</given-names></name> <name><surname>Paiva</surname> <given-names>SM</given-names></name> <name><surname>Martins</surname> <given-names>RC</given-names></name></person-group>. <article-title>Learning experience in endodontics: brazilian students&#x2019; perceptions</article-title>. <source>J Dent Educ</source>. (<year>2013</year>) <volume>77</volume>(<issue>5</issue>):<fpage>648</fpage>&#x2013;<lpage>55</lpage>. <pub-id pub-id-type="doi">10.1002/j.0022-0337.2013.77.5.tb05515.x</pub-id><pub-id pub-id-type="pmid">23658412</pub-id></mixed-citation></ref>
<ref id="B32"><label>32.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baaij</surname> <given-names>A</given-names></name> <name><surname>Kruse</surname> <given-names>C</given-names></name> <name><surname>Whitworth</surname> <given-names>J</given-names></name> <name><surname>Jarad</surname> <given-names>F</given-names></name></person-group>. <article-title>European Society of endodontology undergraduate curriculum guidelines for endodontology</article-title>. <source>Int Endod J</source>. (<year>2024</year>) <volume>57</volume>(<issue>8</issue>):<fpage>982</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1111/iej.14064</pub-id><pub-id pub-id-type="pmid">38551606</pub-id></mixed-citation></ref>
<ref id="B33"><label>33.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Davey</surname> <given-names>J</given-names></name> <name><surname>Bryant</surname> <given-names>S</given-names></name> <name><surname>Dummer</surname> <given-names>P</given-names></name></person-group>. <article-title>The confidence of undergraduate dental students when performing root canal treatment and their perception of the quality of endodontic education</article-title>. <source>Eur J Dent Educ</source>. (<year>2015</year>) <volume>19</volume>(<issue>4</issue>):<fpage>229</fpage>&#x2013;<lpage>34</lpage>. <pub-id pub-id-type="doi">10.1111/eje.12130</pub-id><pub-id pub-id-type="pmid">25490882</pub-id></mixed-citation></ref>
<ref id="B34"><label>34.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Hattar</surname> <given-names>S</given-names></name> <name><surname>AlHadidi</surname> <given-names>A</given-names></name> <name><surname>Altarawneh</surname> <given-names>S</given-names></name> <name><surname>Hamdan</surname> <given-names>AA</given-names></name> <name><surname>Shaini</surname> <given-names>FJ</given-names></name> <name><surname>Wahab</surname> <given-names>FK</given-names></name></person-group>. <article-title>Dental students&#x2019; experience and perceived confidence level in different restorative procedures</article-title>. <source>Eur J Dent Educ</source>. (<year>2021</year>) <volume>25</volume>(<issue>1</issue>):<fpage>207</fpage>&#x2013;<lpage>14</lpage>. <pub-id pub-id-type="doi">10.1111/eje.12592</pub-id><pub-id pub-id-type="pmid">33245624</pub-id></mixed-citation></ref>
<ref id="B35"><label>35.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Baaij</surname> <given-names>A</given-names></name> <name><surname>&#x00D6;zok</surname> <given-names>A</given-names></name></person-group>. <article-title>Method of teaching undergraduate students to perform root canal treatment: it&#x2019;s influence on the quality of root fillings</article-title>. <source>Eur J Dent Educ</source>. (<year>2018</year>) <volume>22</volume>(<issue>2</issue>):<fpage>e221</fpage>&#x2013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1111/eje.12275</pub-id><pub-id pub-id-type="pmid">28636151</pub-id></mixed-citation></ref>
<ref id="B36"><label>36.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Moidu</surname> <given-names>NP</given-names></name> <name><surname>Sharma</surname> <given-names>S</given-names></name> <name><surname>Chawla</surname> <given-names>A</given-names></name> <name><surname>Kumar</surname> <given-names>V</given-names></name> <name><surname>Logani</surname> <given-names>A</given-names></name></person-group>. <article-title>Deep learning for categorization of endodontic lesion based on radiographic periapical index scoring system</article-title>. <source>Clin Oral Investig</source>. (<year>2022</year>) <volume>26</volume>(<issue>1</issue>):<fpage>651</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1007/s00784-021-04043-y</pub-id><pub-id pub-id-type="pmid">34213664</pub-id></mixed-citation></ref>
<ref id="B37"><label>37.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Y&#x00FC;zba&#x015F;&#x0131;o&#x011F;lu</surname> <given-names>E</given-names></name></person-group>. <article-title>Attitudes and perceptions of dental students towards artificial intelligence</article-title>. <source>J Dent Educ</source>. (<year>2021</year>) <volume>85</volume>(<issue>1</issue>):<fpage>60</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1002/jdd.12385</pub-id></mixed-citation></ref>
<ref id="B38"><label>38.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Buyuk</surname> <given-names>C</given-names></name></person-group>. <article-title>Should artificial intelligence integrate with dental education? An assessment through the dentomaxillofacial radiology perspective</article-title>. <source>J Res Dentistry</source>. (<year>2021</year>) <volume>9</volume>(<issue>1</issue>):<fpage>6</fpage>&#x2013;<lpage>13</lpage>. <pub-id pub-id-type="doi">10.19177/jrd.v9e120216-13</pub-id></mixed-citation></ref>
<ref id="B39"><label>39.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Nassar</surname> <given-names>HM</given-names></name> <name><surname>Tekian</surname> <given-names>A</given-names></name></person-group>. <article-title>Computer simulation and virtual reality in undergraduate operative and restorative dental education: a critical review</article-title>. <source>J Dent Educ</source>. (<year>2020</year>) <volume>84</volume>(<issue>7</issue>):<fpage>812</fpage>&#x2013;<lpage>29</lpage>. <pub-id pub-id-type="doi">10.1002/jdd.12138</pub-id><pub-id pub-id-type="pmid">32147841</pub-id></mixed-citation></ref>
<ref id="B40"><label>40.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Prinz</surname> <given-names>M</given-names></name> <name><surname>B&#x00FC;rklein</surname> <given-names>S</given-names></name> <name><surname>Sch&#x00E4;fer</surname> <given-names>E</given-names></name> <name><surname>Donnermeyer</surname> <given-names>D</given-names></name></person-group>. <article-title>An AI-based e-learning tool to improve endodontic diagnostics in undergraduate students</article-title>. <source>J Dent Educ</source>. (<year>2024</year>) <volume>88</volume>:<fpage>1935</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1002/jdd.13479</pub-id><pub-id pub-id-type="pmid">38343338</pub-id></mixed-citation></ref>
<ref id="B41"><label>41.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>El-Shafai</surname> <given-names>W</given-names></name> <name><surname>Almomani</surname> <given-names>I</given-names></name> <name><surname>AlKhayer</surname> <given-names>A</given-names></name></person-group>. <article-title>Visualized malware multi-classification framework using fine-tuned CNN-based transfer learning models</article-title>. <source>Appl Sci</source>. (<year>2021</year>) <volume>11</volume>(<issue>14</issue>):<fpage>6446</fpage>. <pub-id pub-id-type="doi">10.3390/app11146446</pub-id></mixed-citation></ref>
<ref id="B42"><label>42.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Almomani</surname> <given-names>I</given-names></name> <name><surname>Alkhayer</surname> <given-names>A</given-names></name> <name><surname>El-Shafai</surname> <given-names>W</given-names></name></person-group>. <article-title>An automated vision-based deep learning model for efficient detection of android malware attacks</article-title>. <source>IEEE Access</source>. (<year>2022</year>) <volume>10</volume>:<fpage>2700</fpage>&#x2013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2022.3140341</pub-id></mixed-citation></ref>
<ref id="B43"><label>43.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Abadi</surname> <given-names>M</given-names></name> <name><surname>Barham</surname> <given-names>P</given-names></name> <name><surname>Chen</surname> <given-names>J</given-names></name> <name><surname>Chen</surname> <given-names>Z</given-names></name> <name><surname>Davis</surname> <given-names>A</given-names></name> <name><surname>Dean</surname> <given-names>J</given-names></name><etal/></person-group> <source>&#x007B;TensorFlow&#x007D;: A System for &#x007B;Large-Scale&#x007D; Machine Learning</source>. <publisher-loc>Berkeley, CA</publisher-loc>: <publisher-name>USENIX Association</publisher-name> (<year>2016</year>). p. <fpage>265</fpage>&#x2013;<lpage>83</lpage>.</mixed-citation></ref>
<ref id="B44"><label>44.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Brownlee</surname> <given-names>J</given-names></name></person-group>. <source>Deep learning with python: develop deep learning models on theano and TensorFlow using keras</source>. <publisher-loc>Auckland</publisher-loc>: <publisher-name>Machine Learn Mastery</publisher-name> (<year>2016</year>).</mixed-citation></ref>
<ref id="B45"><label>45.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>G&#x00E9;ron</surname> <given-names>A</given-names></name></person-group>. <source>Hands-on machine learning with scikit-learn. Keras, and TensorFlow</source>. <publisher-name>O&#x0027;Reilly Media, Inc</publisher-name> (<year>2022</year>).</mixed-citation></ref>
<ref id="B46"><label>46.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Hodnett</surname> <given-names>M</given-names></name> <name><surname>Wiley</surname> <given-names>JF</given-names></name></person-group>. <source>R Deep Learning Essentials: A Step-by-step guide to Building Deep Learning Models Using TensorFlow, Keras, and MXNet</source>. <publisher-loc>Birmingham</publisher-loc>: <publisher-name>Packt Publishing Ltd</publisher-name> (<year>2018</year>).</mixed-citation></ref>
<ref id="B47"><label>47.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Joseph</surname> <given-names>FJJ</given-names></name> <name><surname>Nonsiri</surname> <given-names>S</given-names></name> <name><surname>Monsakul</surname> <given-names>A</given-names></name></person-group>. <source>Keras and TensorFlow: A Hands-on Experience. Advanced Deep Learning for Engineers and Scientists: A Practical Approach</source>. <publisher-loc>Cham</publisher-loc>: <publisher-name>Springer</publisher-name> (<year>2021</year>). p. <fpage>85</fpage>&#x2013;<lpage>111</lpage>.</mixed-citation></ref>
<ref id="B48"><label>48.</label><mixed-citation publication-type="book"><person-group person-group-type="author"><name><surname>Vasilev</surname> <given-names>I</given-names></name> <name><surname>Slater</surname> <given-names>D</given-names></name> <name><surname>Spacagna</surname> <given-names>G</given-names></name> <name><surname>Roelants</surname> <given-names>P</given-names></name> <name><surname>Zocca</surname> <given-names>V</given-names></name></person-group>. <source>Python Deep Learning: Exploring Deep Learning Techniques and Neural Network Architectures with Pytorch, Keras, and TensorFlow</source>. <publisher-loc>Birmingham</publisher-loc>: <publisher-name>Packt Publishing Ltd</publisher-name> (<year>2019</year>).</mixed-citation></ref>
<ref id="B49"><label>49.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Bera</surname> <given-names>S</given-names></name> <name><surname>Shrivastava</surname> <given-names>VK</given-names></name></person-group>. <article-title>Analysis of various optimizers on deep convolutional neural network model in the application of hyperspectral remote sensing image classification</article-title>. <source>Int J Remote Sens</source>. (<year>2020</year>) <volume>41</volume>(<issue>7</issue>):<fpage>2664</fpage>&#x2013;<lpage>83</lpage>. <pub-id pub-id-type="doi">10.1080/01431161.2019.1694725</pub-id></mixed-citation></ref>
<ref id="B50"><label>50.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Ramezanzade</surname> <given-names>S</given-names></name> <name><surname>Laurentiu</surname> <given-names>T</given-names></name> <name><surname>Bakhshandah</surname> <given-names>A</given-names></name> <name><surname>Ibragimov</surname> <given-names>B</given-names></name> <name><surname>Kvist</surname> <given-names>T</given-names></name> <name><surname>Bj&#x00F8;rndal</surname> <given-names>L</given-names></name></person-group>. <article-title>The efficiency of artificial intelligence methods for finding radiographic features in different endodontic treatments-a systematic review</article-title>. <source>Acta Odontol Scand</source>. (<year>2023</year>) <volume>81</volume>(<issue>6</issue>):<fpage>422</fpage>&#x2013;<lpage>35</lpage>. <pub-id pub-id-type="doi">10.1080/00016357.2022.2158929</pub-id><pub-id pub-id-type="pmid">36548872</pub-id></mixed-citation></ref>
<ref id="B51"><label>51.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y</given-names></name> <name><surname>Zeng</surname> <given-names>G</given-names></name> <name><surname>Zhang</surname> <given-names>Y</given-names></name> <name><surname>Wang</surname> <given-names>J</given-names></name> <name><surname>Jin</surname> <given-names>Q</given-names></name> <name><surname>Sun</surname> <given-names>L</given-names></name><etal/></person-group> <article-title>AGMB-transformer: anatomy-guided multi-branch transformer network for automated evaluation of root canal therapy</article-title>. <source>IEEE J Biomed Health Inform</source>. (<year>2021</year>) <volume>26</volume>(<issue>4</issue>):<fpage>1684</fpage>&#x2013;<lpage>95</lpage>. <pub-id pub-id-type="doi">10.1109/JBHI.2021.3129245</pub-id></mixed-citation></ref>
<ref id="B52"><label>52.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Mulryan-Kyne</surname> <given-names>C</given-names></name></person-group>. <article-title>Teaching large classes at college and university level: challenges and opportunities</article-title>. <source>Teach Higher Educ</source>. (<year>2010</year>) <volume>15</volume>(<issue>2</issue>):<fpage>175</fpage>&#x2013;<lpage>85</lpage>. <pub-id pub-id-type="doi">10.1080/13562511003620001</pub-id></mixed-citation></ref>
<ref id="B53"><label>53.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Qualtrough</surname> <given-names>A</given-names></name></person-group>. <article-title>Undergraduate endodontic education: what are the challenges?</article-title> <source>Br Dent J</source>. (<year>2014</year>) <volume>216</volume>(<issue>6</issue>):<fpage>361</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1038/sj.bdj.2014.227</pub-id><pub-id pub-id-type="pmid">24651348</pub-id></mixed-citation></ref>
<ref id="B54"><label>54.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Saghiri</surname> <given-names>MA</given-names></name> <name><surname>Garcia-Godoy</surname> <given-names>F</given-names></name> <name><surname>Gutmann</surname> <given-names>JL</given-names></name> <name><surname>Lotfi</surname> <given-names>M</given-names></name> <name><surname>Asgar</surname> <given-names>K</given-names></name></person-group>. <article-title>The reliability of artificial neural network in locating minor apical foramen: a cadaver study</article-title>. <source>J Endod</source>. (<year>2012</year>) <volume>38</volume>(<issue>8</issue>):<fpage>1130</fpage>&#x2013;<lpage>4</lpage>. <pub-id pub-id-type="doi">10.1016/j.joen.2012.05.004</pub-id><pub-id pub-id-type="pmid">22794221</pub-id></mixed-citation></ref>
<ref id="B55"><label>55.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Almutairi</surname> <given-names>M</given-names></name> <name><surname>Alattas</surname> <given-names>MH</given-names></name> <name><surname>Alamoudi</surname> <given-names>A</given-names></name> <name><surname>Bahammam</surname> <given-names>SA</given-names></name> <name><surname>Zidane</surname> <given-names>B</given-names></name> <name><surname>Almutairi</surname> <given-names>N</given-names></name><etal/></person-group> <article-title>Challenges assessment in endodontics among undergraduate students</article-title>. <source>Cureus</source>. (<year>2023</year>) <volume>15</volume>(<issue>8</issue>):<fpage>e43215</fpage>. <pub-id pub-id-type="doi">10.7759/cureus.43215</pub-id><pub-id pub-id-type="pmid">37692626</pub-id></mixed-citation></ref>
<ref id="B56"><label>56.</label><mixed-citation publication-type="journal"><person-group person-group-type="author"><name><surname>Barakat</surname> <given-names>RM</given-names></name> <name><surname>Matoug-Elwerfelli</surname> <given-names>M</given-names></name> <name><surname>Almohareb</surname> <given-names>RA</given-names></name> <name><surname>Balto</surname> <given-names>HA</given-names></name></person-group>. <article-title>Influence of preclinical training on root canal treatment technical quality and confidence level of undergraduate dental students</article-title>. <source>Int J Dent</source>. (<year>2021</year>) <volume>2021</volume>:<fpage>1</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1155/2021/9920280</pub-id></mixed-citation></ref></ref-list>
<fn-group>
<fn id="n1" fn-type="custom" custom-type="edited-by"><p>Edited by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2697697/overview">Viresh Chopra</ext-link>, Oman Dental College, Oman</p></fn>
<fn id="n2" fn-type="custom" custom-type="reviewed-by"><p>Reviewed by: <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1971465/overview">Surendra Maharjan</ext-link>, NewYork-Presbyterian, United States</p>
<p><ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/3265257/overview">Hadeel Mazin Akram</ext-link>, University of Baghdad, Iraq</p></fn>
</fn-group>
</back>
</article>