<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Endocrinol.</journal-id>
<journal-title>Frontiers in Endocrinology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Endocrinol.</abbrev-journal-title>
<issn pub-type="epub">1664-2392</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fendo.2024.1372397</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Endocrinology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Improving the diagnostic performance of inexperienced readers for thyroid nodules through digital self-learning and artificial intelligence assistance</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Lee</surname>
<given-names>Si Eun</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2739437"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Kim</surname>
<given-names>Hye Jung</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Jung</surname>
<given-names>Hae Kyoung</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Jung</surname>
<given-names>Jin Hyang</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Jeon</surname>
<given-names>Jae-Han</given-names>
</name>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lee</surname>
<given-names>Jin Hee</given-names>
</name>
<xref ref-type="aff" rid="aff6">
<sup>6</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Hong</surname>
<given-names>Hanpyo</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Lee</surname>
<given-names>Eun Jung</given-names>
</name>
<xref ref-type="aff" rid="aff7">
<sup>7</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Kim</surname>
<given-names>Daham</given-names>
</name>
<xref ref-type="aff" rid="aff8">
<sup>8</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2132111"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Kwak</surname>
<given-names>Jin Young</given-names>
</name>
<xref ref-type="aff" rid="aff9">
<sup>9</sup>
</xref>
<xref ref-type="author-notes" rid="fn001">
<sup>*</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/534562"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/project-administration/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>Department of Radiology, Yongin Severance Hospital, College of Medicine, Yonsei University</institution>, <addr-line>Yongin-si</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Radiology, Kyungpook National University Chilgok Hospital</institution>, <addr-line>Daegu</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff3">
<sup>3</sup>
<institution>Department of Radiology, CHA University Bundang Medical Center</institution>, <addr-line>Seongnam-si</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff4">
<sup>4</sup>
<institution>Department of Surgery, Kyungpook National University Chilgok Hospital</institution>, <addr-line>Daegu</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff5">
<sup>5</sup>
<institution>Department of Endocrinology, Kyungpook National University Chilgok Hospital</institution>, <addr-line>Daegu</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff6">
<sup>6</sup>
<institution>Department of Radiology, Keimyung University Dongsan Hospital</institution>, <addr-line>Daegu</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff7">
<sup>7</sup>
<institution>Department of Computational Science and Engineering, Yonsei University</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff8">
<sup>8</sup>
<institution>Department of Endocrinology, College of Medicine, Yonsei University</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<aff id="aff9">
<sup>9</sup>
<institution>Department of Radiology, College of Medicine, Yonsei University</institution>, <addr-line>Seoul</addr-line>, <country>Republic of Korea</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>Edited by: Jacopo Manso, University of Padua, Italy</p>
</fn>
<fn fn-type="edited-by">
<p>Reviewed by: Hersh Sagreiya, University of Pennsylvania, United States</p>
<p>Eun Cho, Gyeongsang National University Changwon Hospital, Republic of Korea</p>
<p>Lorenzo Faggioni, University of Pisa, Italy</p>
</fn>
<fn fn-type="corresp" id="fn001">
<p>*Correspondence: Jin Young Kwak, <email xlink:href="mailto:docjin@yuhs.ac">docjin@yuhs.ac</email>; Hye Jung Kim, <email xlink:href="mailto:ant637@knuh.kr">ant637@knuh.kr</email>
</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>02</day>
<month>07</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>15</volume>
<elocation-id>1372397</elocation-id>
<history>
<date date-type="received">
<day>18</day>
<month>01</month>
<year>2024</year>
</date>
<date date-type="accepted">
<day>12</day>
<month>06</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Lee, Kim, Jung, Jung, Jeon, Lee, Hong, Lee, Kim and Kwak</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Lee, Kim, Jung, Jung, Jeon, Lee, Hong, Lee, Kim and Kwak</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Background</title>
<p>Data-driven digital learning could improve the diagnostic performance of novice students for thyroid nodules.</p>
</sec>
<sec>
<title>Objective</title>
<p>To evaluate the efficacy of digital self-learning and artificial intelligence-based computer-assisted diagnosis (AI-CAD) for inexperienced readers to diagnose thyroid nodules.</p>
</sec>
<sec>
<title>Methods</title>
<p>Between February and August 2023, a total of 26 readers (less than 1 year of experience in thyroid US from various departments) from 6 hospitals participated in this study. Readers completed an online learning session comprising 3,000 thyroid nodules annotated as benign or malignant independently. They were asked to assess a test set consisting of 120 thyroid nodules with known surgical pathology before and after a learning session. Then, they referred to AI-CAD and made their final decisions on the thyroid nodules. Diagnostic performances before and after self-training and with AI-CAD assistance were evaluated and compared between radiology residents and readers from different specialties.</p>
</sec>
<sec>
<title>Results</title>
<p>AUC (area under the receiver operating characteristic curve) improved after the self-learning session, and it improved further after radiologists referred to AI-CAD (0.679 vs 0.713 vs 0.758, p&lt;0.05). Although the 18 radiology residents showed improved AUC (0.7 to 0.743, p=0.016) and accuracy (69.9% to 74.2%, p=0.013) after self-learning, the readers from other departments did not. With AI-CAD assistance, sensitivity (radiology 70.3% to 74.9%, others 67.9% to 82.3%, all p&lt;0.05) and accuracy (radiology 74.2% to 77.1%, others 64.4% to 72.8%, all p &lt;0.05) improved in all readers.</p>
</sec>
<sec>
<title>Conclusion</title>
<p>While AI-CAD assistance helps improve the diagnostic performance of all inexperienced readers for thyroid nodules, self-learning was only effective for radiology residents with more background knowledge of ultrasonography.</p>
</sec>
<sec>
<title>Clinical Impact</title>
<p>Online self-learning, along with AI-CAD assistance, can effectively enhance the diagnostic performance of radiology residents in thyroid cancer.</p>
</sec>
</abstract>
<kwd-group>
<kwd>thyroid cancer</kwd>
<kwd>artificial intelligence</kwd>
<kwd>ultrasound</kwd>
<kwd>learning</kwd>
<kwd>digital learning</kwd>
</kwd-group>
<contract-sponsor id="cn001">Ministry of Science and ICT, South Korea<named-content content-type="fundref-id">10.13039/501100014188</named-content>
</contract-sponsor>
<counts>
<fig-count count="2"/>
<table-count count="3"/>
<equation-count count="0"/>
<ref-count count="29"/>
<page-count count="8"/>
<word-count count="3618"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-in-acceptance</meta-name>
<meta-value>Thyroid Endocrinology</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>Highlights</title>
<list list-type="bullet">
<list-item>
<p>
<bold>Key-finding:</bold> Online self-learning with 3,000 cases improved the diagnostic performance of 26 inexperienced readers (0.679 vs 0.713, p=0.027). Results from an artificial intelligence-based computer-assisted diagnosis program improved it even more (0.713 vs 0.758, p=0.001)</p>
</list-item>
<list-item>
<p>
<bold>Importance:</bold> Online self-learning can improve the diagnostic performance of inexperienced readers from variable backgrounds, and performance can be further enhanced with artificial intelligence-based computer-assisted diagnosis software.</p>
</list-item>
</list>
</sec>
<sec id="s2" sec-type="intro">
<title>Introduction</title>
<p>The primary tool for diagnosing thyroid cancer is ultrasonography (US) (<xref ref-type="bibr" rid="B1">1</xref>&#x2013;<xref ref-type="bibr" rid="B5">5</xref>). While US exhibits a high diagnostic accuracy, it is inherently operator-dependent and this necessitates appropriate training of related personnel to maintain the quality of examinations. Traditionally, US training isconducted through textbooks, lectures, or one-on-one education sessions between an educator and trainee. While the latter method has been effective, it also has notable disadvantages, such as putting a significant burden on educators and resources and an inability to guarantee a consistent quality of education (<xref ref-type="bibr" rid="B6">6</xref>).</p>
<p>Considerable experience is required to make accurate diagnoses with US, and the skill of examiners is known to correlate with the number of scans they have performed (<xref ref-type="bibr" rid="B7">7</xref>, <xref ref-type="bibr" rid="B8">8</xref>). Thus, trainees need sufficient practice before performing examinations on people; not only is foundational knowledge of scan techniques or anatomy required but also preparation for actual &#x201c;diagnosis&#x201d; or &#x201c;decision-making&#x201d; is required. The diagnostic performance of inexperienced readers is known to improve through one-on-one training or structured training in the radiology department (<xref ref-type="bibr" rid="B9">9</xref>&#x2013;<xref ref-type="bibr" rid="B11">11</xref>). Considering the pattern-based diagnosis of thyroid nodules in US, simple training with a large number of image examples combined with answers can be helpful when learning how to differentiate benign and malignant thyroid nodules. In a past study, deep learning software achieved similar diagnostic performance to expert radiologists based on 13,560 images (<xref ref-type="bibr" rid="B12">12</xref>), and in another, meaningful improvements in diagnostic performance were also observed in college students who had no previous experience in thyroid US, who went through learning sessions using a large training input of image-pathology sets (<xref ref-type="bibr" rid="B13">13</xref>).</p>
<p>With the development and commercialization of artificial intelligence-based computer-assisted diagnosis (AI-CAD) in thyroid imaging, potential improvements have been reported in diagnostic performance, particularly among readers with relatively limited experience (<xref ref-type="bibr" rid="B14">14</xref>&#x2013;<xref ref-type="bibr" rid="B16">16</xref>). Thyroid Imaging Reporting and Data System (TI-RADS) is commonly used in the evaluation of thyroid nodules, and one study showed that an AI algorithm trained on TI-RADS characteristics outperformed another trained solely on distinguishing benign from malignant nodules (<xref ref-type="bibr" rid="B17">17</xref>). Furthermore, another study reported that an AI-proposed new TI-RADS criteria demonstrated superior specificity compared to the established American College of Radiology (ACR) TI-RADS (<xref ref-type="bibr" rid="B18">18</xref>). This underscores the potential of AI to enhance diagnostic protocols by leveraging structured reporting systems like TI-RADS. These advancements in AI-CAD not only support diagnostic precision but also provide crucial feedback during the learning phase, directly assisting beginner radiologists. We hypothesize that AI assistance can further aid beginner radiologists in diagnosing thyroid nodules after they undergo a self-learning process, ensuring more consistent and reliable diagnostic outcomes.</p>
<p>In this study, we investigated the value of self-learning and AI-CAD assistance in inexperienced readers.</p>
</sec>
<sec id="s3" sec-type="materials|methods">
<title>Materials and methods</title>
<p>This study was approved by the Institutional Review Board of Severance Hospital and informed consent was obtained from all participants (No. 4&#x2013;2022-1562).</p>
<sec id="s3_1">
<title>Study design</title>
<p>Between February and August 2023, we recruited 26 inexperienced readers (less than 1 year of experience in thyroid US) from 6 hospitals. These participants were medical residents or fellows specializing in various departments including radiology, internal medicine, surgery, and family medicine. At first, readers were asked to watch a 5-minute online lecture (available via <ext-link ext-link-type="uri" xlink:href="https://youtu.be/pnF5vUaIovI">https://youtu.be/pnF5vUaIovI</ext-link>, Korean only) on K-TIRADS (Korean Thyroid Imaging Reporting and Data System classification (<xref ref-type="bibr" rid="B19">19</xref>) and perform a pretest consisting of 120 US images to make binary decisions (benign vs malignant) and assess K-TIRADS categories. Next, readers learned with a training set of 3,000 US images using an online platform, designed to consecutively display single nodule images, each accompanied by a binary diagnosis of benign or malignant. The platform allowed readers to adjust the playback speed according to their preferences. After completing the learning session, readers immediately repeated the same test as the pretest. Lastly, they underwent the test again, this time with AI assistance, using the SERA (SEveRance Artificial intelligence) program described in the following section. They were asked to complete training and testing within two weeks, and while the pace of online learning was adjusted to each individual, the readers had to record the time taken to study all 3,000 cases and the time spent on testing (<xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>).</p>
<table-wrap id="T1" position="float">
<label>Table&#xa0;1</label>
<caption>
<p>General information on the 26 inexperienced readers from 6 hospitals.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="left">Total (%)</th>
<th valign="top" align="left">Radiology department<break/>(n=18)</th>
<th valign="top" align="left">Other departments<break/>(n=8)</th>
<th valign="top" align="left">p-value</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Department<break/>&#x2003;Radiology<break/>&#x2003;Internal medicine<break/>&#x2003;Surgery<break/>&#x2003;Family medicine</td>
<td valign="top" align="left">
<break/>18 (69.2)<break/>2 (7.7)<break/>2 (7.7)<break/>4 (15.4)</td>
<td valign="top" align="left"/>
<td valign="top" align="left"/>
<td valign="top" align="left"/>
</tr>
<tr>
<td valign="top" align="left">Duration of previous thyroid US (month, SD)</td>
<td valign="top" align="left">2.3 &#xb1; 3.0</td>
<td valign="top" align="left">2.4 &#xb1; 2.6</td>
<td valign="top" align="left">2.4 &#xb1; 4.1</td>
<td valign="top" align="left">0.969</td>
</tr>
<tr>
<td valign="top" align="left">Time required for self-learning (min, SD)</td>
<td valign="top" align="left">222 &#xb1; 120</td>
<td valign="top" align="left">247 &#xb1; 140</td>
<td valign="top" align="left">165 &#xb1; 42</td>
<td valign="top" align="left">0.122</td>
</tr>
<tr>
<td valign="top" align="left">Time required for test with AI assistance (min, SD)</td>
<td valign="top" align="left">83 &#xb1; 59</td>
<td valign="top" align="left">83 &#xb1; 68</td>
<td valign="top" align="left">83 &#xb1; 41</td>
<td valign="top" align="left">0.990</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>Min, minute; SD, standard deviation; AI, artificial intelligence.</p>
</fn>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="s3_2">
<title>Learning and test sets</title>
<p>We selected 3,000 images from 13,560 image sets utilized in a previous study (<xref ref-type="bibr" rid="B13">13</xref>). Images that demonstrated the most significant mean accuracy enhancement compared to earlier data points were selected, and these images made up Set 3 in the preceding study (<xref ref-type="bibr" rid="B13">13</xref>). The mean age of patients from whom the US images were derived for the learning set was 48.2 &#xb1; 13.8 years, and 81% of the patients were women. The mean size of the nodules was 20.0 &#xb1; 11.0 mm, with 49% being benign and 51% malignant, the latter of which 98.8% were identified as papillary thyroid carcinoma.</p>
<p>The test set, which was not included in the learning set, included 120 surgically confirmed thyroid nodules. The sample size for the test set was determined through estimations of the effect size, non-centrality parameters, denominator degrees of freedom, and power calculations. The mean age of patients from whom the US images were obtained for the test set was 43.7 &#xb1; 12.4 years, and 78.3% of the patients were women. The mean size of the nodules was 20.1 &#xb1; 9.4 mm. In terms of pathology, 48% of the nodules were benign and 52% were malignant, with a vast majority (93.5%) of the malignant nodules being classified as papillary thyroid carcinoma.</p>
<p>The standard reference of the test set for K-TIRADS assessment was consensus among the three experienced readers (5, 13, 23 years of experience in thyroid imaging). For reference, their intraclass correlation coefficient (ICC) was 0.908 (95% CI 0.876&#x2013;0.933).</p>
</sec>
<sec id="s3_3">
<title>AI-CAD application</title>
<p>SERA is an online deep learning-based computer-aided diagnosis program trained with 13,560 US images of thyroid nodules that were surgically confirmed or cytologically proven as benign (category II) or malignant (category VI) on the Bethesda system and larger than 1cm in size (<xref ref-type="bibr" rid="B12">12</xref>). When users upload an US image cropped around the focal thyroid lesion according to user preference, SERA provides continuous numbers between 0 and 100, which correspond to the probability of the given test image being malignant (<xref ref-type="fig" rid="f1">
<bold>Figure&#xa0;1</bold>
</xref>). Since SERA presents results that are dependent on how images are cropped and which images are uploaded, the SERA scores are impacted by the initial judgments of users. In prior research, SERA showed comparable diagnostic performance to expert radiologists in an external validation set for diagnosing thyroid nodules (<xref ref-type="bibr" rid="B12">12</xref>).</p>
<fig id="f1" position="float">
<label>Figure&#xa0;1</label>
<caption>
<p>Image of the working process of the SERA program. When an US image is uploaded and cropped by the user, SERA presents the binary result (benign or malignant) with a malignant probability score. SERA, SEveRance Artificial intelligence program.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fendo-15-1372397-g001.tif"/>
</fig>
</sec>
<sec id="s3_4">
<title>Statistical analysis</title>
<p>Sensitivity, specificity, accuracy and area under the receiver operating characteristic curve (AUC) were used to assess the diagnostic performance of each inexperienced reader. Interobserver agreement was quantified by the ICC. A two-sample t-test was used to detect differences between groups, specifically readers of radiology against readers of other specialties. The paired t-test was used to assess changes in diagnostic performance within the same group throughout the training program.</p>
<p>All statistical analyses were performed using SPSS (version 26.0) and MedCalc 22.009 (MedCalc Software, Oostende, Belgium). A p-value of 0.05 or less was considered statistically significant.</p>
</sec>
</sec>
<sec id="s4" sec-type="results">
<title>Results</title>
<p>Among 26 participants, 18 readers were radiology residents (1<sup>st</sup> and 2<sup>nd</sup> year), and the other 8 were 4 fellows in endocrinology and surgery and 4 residents in family medicine (3<sup>rd</sup> year). All 26 readers had none to little experience with thyroid US (range 0&#x2013;10 months). The learning process for the 3,000 sets took an average of 222 minutes, and the test for the 120 sets utilizing AI assistance was completed in an average of 85 minutes (<xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>). There was no statistical difference in the duration of exposure between radiology residents and readers of other specialties (<xref ref-type="table" rid="T1">
<bold>Table&#xa0;1</bold>
</xref>).</p>
<sec id="s4_1">
<title>Changes in diagnostic performance after self-learning</title>
<p>After self-learning with 3,000 cases, 26 readers improved accuracy (68.0% vs 71.2%, p=0.037) and AUC (0.679 vs 0.713, p=0.027) compared to their pretest performance (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>).</p>
<table-wrap id="T2" position="float">
<label>Table&#xa0;2</label>
<caption>
<p>Changes in the mean diagnostic performance of 26 readers during the learning program.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="left">Pretest</th>
<th valign="top" align="left">Posttest<sup>*</sup>
</th>
<th valign="top" align="left">P-value<sup>&#x2020;</sup>
</th>
<th valign="top" align="left">AI-assistance</th>
<th valign="top" align="left">P-value<sup>&#x2021;</sup>
</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Sensitivity (%)</td>
<td valign="top" align="left">70.2 &#xb1; 15.7</td>
<td valign="top" align="left">69.6 &#xb1; 13.4</td>
<td valign="top" align="left">0.857</td>
<td valign="top" align="left">77.2 &#xb1; 8.7</td>
<td valign="top" align="left">0.002</td>
</tr>
<tr>
<td valign="top" align="left">Specificity (%)</td>
<td valign="top" align="left">65.7 &#xb1; 24.7</td>
<td valign="top" align="left">73.1 &#xb1; 20.0</td>
<td valign="top" align="left">0.126</td>
<td valign="top" align="left">74.3 &#xb1; 18.0</td>
<td valign="top" align="left">0.584</td>
</tr>
<tr>
<td valign="top" align="left">Accuracy (%)</td>
<td valign="top" align="left">68.0 &#xb1; 6.6</td>
<td valign="top" align="left">71.2 &#xb1; 6.4</td>
<td valign="top" align="left">0.037</td>
<td valign="top" align="left">75.8 &#xb1; 5.1</td>
<td valign="top" align="left">0.001</td>
</tr>
<tr>
<td valign="top" align="left">AUC</td>
<td valign="top" align="left">0.679 &#xb1; 0.07</td>
<td valign="top" align="left">0.713 &#xb1; 0.07</td>
<td valign="top" align="left">0.027</td>
<td valign="top" align="left">0.758 &#xb1; 0.06</td>
<td valign="top" align="left">0.001</td>
</tr>
<tr>
<td valign="top" align="left">ICC</td>
<td valign="top" align="left">0.575 &#xb1; 0.12</td>
<td valign="top" align="left">0.601 &#xb1; 0.13</td>
<td valign="top" align="left">0.104</td>
<td valign="top" align="left">0.590 &#xb1; 0.12</td>
<td valign="top" align="left">0.608</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>AI, artificial intelligence.</p>
</fn>
<fn>
<p>AUC, area under the receiver operating characteristic curve; ICC, intraclass correlation coefficients.</p>
</fn>
<fn>
<p>
<sup>*</sup> after self-learning.</p>
</fn>
<fn>
<p>
<sup>&#x2020;</sup> Comparison between pretest and posttest.</p>
</fn>
<fn>
<p>
<sup>&#x2021;</sup> Comparison between posttest and test with AI assistance.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<p>We separated 18 readers of radiology residency from the remaining 8 readers, and the pretest results of the radiology residents showed higher specificity (73.8% vs 47.4%, p=0.04) (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>). After self-learning, the radiology residents improved accuracy (69.9% to 74.2%, p=0.013) and AUC (0.7 to 0.743, p=0.016), but readers of other departments did not. Also, radiology residents showed better accuracy (74.2% vs 64.4%, p&lt;0.001) and AUC (0.743 vs 0.647, p&lt;0.001) than readers from other departments (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>, <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>).</p>
<table-wrap id="T3" position="float">
<label>Table&#xa0;3</label>
<caption>
<p>Changes in the mean diagnostic performance of 26 readers during the learning program compared between radiology residents and readers of other specialties.</p>
</caption>
<table frame="hsides">
<thead>
<tr>
<th valign="top" align="left"/>
<th valign="top" align="left">Pretest</th>
<th valign="top" align="left">Posttest<sup>*</sup>
</th>
<th valign="top" align="left">P-value<sup>&#x2020;</sup>
</th>
<th valign="top" align="left">AI-assistance</th>
<th valign="top" align="left">P-value<sup>&#x2021;</sup>
</th>
</tr>
</thead>
<tbody>
<tr>
<th valign="top" colspan="6" align="left">Sensitivity (%)</th>
</tr>
<tr>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">66.1 &#xb1; 14.4</td>
<td valign="top" align="left">70.3 &#xb1; 9.1</td>
<td valign="top" align="left">0.125</td>
<td valign="top" align="left">74.9 &#xb1; 7.8</td>
<td valign="top" align="left">0.023</td>
</tr>
<tr>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">79.2 &#xb1; 18.3</td>
<td valign="top" align="left">67.9 &#xb1; 20.9</td>
<td valign="top" align="left">0.145</td>
<td valign="top" align="left">82.3 &#xb1; 9.0</td>
<td valign="top" align="left">0.024</td>
</tr>
<tr>
<td valign="top" align="left">P-value<sup>&#xa7;</sup>
</td>
<td valign="top" align="left">0.067</td>
<td valign="top" align="left">0.763</td>
<td valign="top" align="left"/>
<td valign="top" align="left">0.069</td>
<td valign="top" align="left"/>
</tr>
<tr>
<th valign="top" colspan="6" align="left">Specificity (%)</th>
</tr>
<tr>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">73.9 &#xb1; 18.3</td>
<td valign="top" align="left">78.3 &#xb1; 14.4</td>
<td valign="top" align="left">0.323</td>
<td valign="top" align="left">79.5 &#xb1; 13.4</td>
<td valign="top" align="left">0.637</td>
</tr>
<tr>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">47.4 &#xb1; 28.6</td>
<td valign="top" align="left">61.4 &#xb1; 26.5</td>
<td valign="top" align="left">0.273</td>
<td valign="top" align="left">62.7 &#xb1; 22.4</td>
<td valign="top" align="left">0.795</td>
</tr>
<tr>
<td valign="top" align="left">P-value<sup>&#xa7;</sup>
</td>
<td valign="top" align="left">0.04</td>
<td valign="top" align="left">0.129</td>
<td valign="top" align="left"/>
<td valign="top" align="left">0.08</td>
<td valign="top" align="left"/>
</tr>
<tr>
<th valign="top" colspan="6" align="left">Accuracy (%)</th>
</tr>
<tr>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">69.9 &#xb1; 5.4</td>
<td valign="top" align="left">74.2 &#xb1; 4.9</td>
<td valign="top" align="left">0.013</td>
<td valign="top" align="left">77.1 &#xb1; 3.9</td>
<td valign="top" align="left">0.046</td>
</tr>
<tr>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">63.9 &#xb1; 7.4</td>
<td valign="top" align="left">64.4 &#xb1; 3.7</td>
<td valign="top" align="left">0.862</td>
<td valign="top" align="left">72.8 &#xb1; 6.5</td>
<td valign="top" align="left">0.007</td>
</tr>
<tr>
<td valign="top" align="left">P-value<sup>&#xa7;</sup>
</td>
<td valign="top" align="left">0.066</td>
<td valign="top" align="left">&lt;0.001</td>
<td valign="top" align="left"/>
<td valign="top" align="left">0.115</td>
<td valign="top" align="left"/>
</tr>
<tr>
<th valign="top" colspan="6" align="left">AUC</th>
</tr>
<tr>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">0.7 &#xb1; 0.06</td>
<td valign="top" align="left">0.743 &#xb1; 0.51</td>
<td valign="top" align="left">0.016</td>
<td valign="top" align="left">0.772 &#xb1; 0.04</td>
<td valign="top" align="left">0.053</td>
</tr>
<tr>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">0.633 &#xb1; 0.08</td>
<td valign="top" align="left">0.647 &#xb1; 0.38</td>
<td valign="top" align="left">0.671</td>
<td valign="top" align="left">0.725 &#xb1; 0.07</td>
<td valign="top" align="left">0.006</td>
</tr>
<tr>
<td valign="top" align="left">P-value<sup>&#xa7;</sup>
</td>
<td valign="top" align="left">0.059</td>
<td valign="top" align="left">&lt;0.001</td>
<td valign="top" align="left"/>
<td valign="top" align="left">0.111</td>
<td valign="top" align="left"/>
</tr>
<tr>
<th valign="top" colspan="6" align="left">ICC</th>
</tr>
<tr>
<td valign="top" align="left">Radiology</td>
<td valign="top" align="left">0.615 &#xb1; 0.11</td>
<td valign="top" align="left">0.621 &#xb1; 0.14</td>
<td valign="top" align="left">0.771</td>
<td valign="top" align="left"/>
<td valign="top" align="left"/>
</tr>
<tr>
<td valign="top" align="left">Other</td>
<td valign="top" align="left">0.485 &#xb1; 0.07</td>
<td valign="top" align="left">0.557 &#xb1; 0.1</td>
<td valign="top" align="left">0.008</td>
<td valign="top" align="left"/>
<td valign="top" align="left"/>
</tr>
<tr>
<td valign="top" align="left">P-value<sup>&#xa7;</sup>
</td>
<td valign="top" align="left">0.002</td>
<td valign="top" align="left">0.203</td>
<td valign="top" align="left"/>
<td valign="top" align="left"/>
<td valign="top" align="left"/>
</tr>
</tbody>
</table>
<table-wrap-foot>
<fn>
<p>AI, artificial intelligence.</p>
</fn>
<fn>
<p>AUC, area under the receiver operating characteristic curve; ICC, intraclass correlation coefficients.</p>
</fn>
<fn>
<p>
<sup>*</sup> after self-learning.</p>
</fn>
<fn>
<p>
<sup>&#x2020;</sup> Comparison between pretest vs. posttest.</p>
</fn>
<fn>
<p>
<sup>&#x2021;</sup> Comparison between posttest vs. with AI assistance.</p>
</fn>
<fn>
<p>
<sup>&#xa7;</sup> Comparison between residents in radiology vs. readers of other specialties.</p>
</fn>
</table-wrap-foot>
</table-wrap>
<fig id="f2" position="float">
<label>Figure&#xa0;2</label>
<caption>
<p>Mean diagnostic performance of readers during the learning program. <bold>(A)</bold> sensitivity, <bold>(B)</bold> specificity, <bold>(C)</bold> accuracy, <bold>(D)</bold> AUC and <bold>(E)</bold> ICC with 95% confidence intervals. The pretest was performed before self-learning and the posttest was performed after self-learning. AI, artificial intelligence; AUC, area under the receiver operating characteristic, ICC, intraclass correlation coefficients.</p>
</caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fendo-15-1372397-g002.tif"/>
</fig>
</sec>
<sec id="s4_2">
<title>Changes in diagnostic performance with AI assistance</title>
<p>For all readers, diagnostic performance improved more with AI assistance compared to posttest; sensitivity (69.6% vs 77.2%, p=0.002), accuracy (71.2% vs 75.8%, p=0.001) and AUC (0.713 vs 0.758, p=0.001) all improved (<xref ref-type="table" rid="T2">
<bold>Table&#xa0;2</bold>
</xref>). In the radiology group, sensitivity increased from 70.3% to 74.9% (p=0.023), and accuracy from 74.2% to 77.1% (p=0.046). In the other departments group, sensitivity increased from 67.9% to 82.3% (p=0.024), accuracy from 62.4% to 72.8% (p=0.007), and AUC from 0.647 to 0.725 (p=0.006). Final sensitivity, specificity, accuracy and AUC were not statistically different between the two groups (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>, <xref ref-type="fig" rid="f2">
<bold>Figure&#xa0;2</bold>
</xref>).</p>
</sec>
<sec id="s4_3">
<title>Changes in K-TIRADS assessment</title>
<p>When we calculated the ICC for K-TIRADS assessment in consensus with the three staff radiologists, the overall ICC for K-TIRADS assessment did not significantly change during self-learning (0.575 vs 0.601). In the subgroup analysis, the ICC of radiology residents was higher than the other department readers in the pretest (0.615 vs 0.485, p=0.002). However, the ICC of readers from other departments increased after self-learning, The ICC showed no statistical difference between the two groups after self-learning (0.621 vs 0.557, p=0.203) (<xref ref-type="table" rid="T3">
<bold>Table&#xa0;3</bold>
</xref>). The ICC value for each reader before and after self-learning is shown in <xref ref-type="supplementary-material" rid="SM1">
<bold>Supplementary Table&#xa0;1</bold>
</xref>.</p>
</sec>
</sec>
<sec id="s5" sec-type="discussion">
<title>Discussion</title>
<p>In this study, we investigated the effectiveness of online-based self-learning for diagnosing thyroid cancer in 26 inexperienced readers from six different hospitals from diverse specialties. Furthermore, we examined the impact of AI assistance on their diagnostic performance for thyroid nodules. After training with a set of 3,000 images, both AUC and accuracy improved for all readers on average, and AI assistance further enhanced these metrics.</p>
<p>Previously, a similar method of self-learning was proposed with 13,560 images being learned by six college freshmen (<xref ref-type="bibr" rid="B13">13</xref>). The six freshmen also showed improved sensitivity, specificity, accuracy, and AUC. However, it took an average of 30 hours for these freshmen to learn with 13,560 images (<xref ref-type="bibr" rid="B13">13</xref>), and viewing 13,560 images at a specific learning location for this amount of time poses considerable challenges in real life. In this study, we provided 3,000 images and all training was executed via an online platform, enabling participants to learn in their personal space at their convenience and record their results subsequently. In our study, we trained individuals with little to no experience in thyroid US but found that those more likely to benefit from training were radiology residents, family medicine residents, endocrinology fellows, and surgery fellows. On average, our participants took a mean of 222 minutes to learn from the 3,000 images, and this training led to increase in accuracy and AUC.</p>
<p>When we performed a subgroup analysis according to the medical department, the benefit of digital self-learning was only significant in radiology residents. Although there was no statistical difference in the recorded duration of exposure in the learning session between the radiology and other department groups, radiology residents are continuously exposed to images and cases through lectures and conferences during their training. This aspect of learning is likely to differentiate them from readers from other medical specialties. For groups less familiar or exposed to US images or radiological diagnostics, self-learning with 3,000 images may simply not be enough to achieve significant increase in diagnostic accuracy. Given the variation in outcomes across different specialties, incorporating detailed explanations for correct or incorrect answers during the self-learning phase could potentially enhance understanding and retention, particularly for those less familiar with ultrasound imaging. This method could mirror more interactive learning approaches found in question banks, which have been shown to improve diagnostic skills by reinforcing learning points through immediate feedback.</p>
<p>After the self-learning process, the final test performance with AI-CAD assistance showed additional increases in sensitivity, AUC, and accuracy. Previous research has well-documented the increased advantage that AI-CAD offers to beginners in US (<xref ref-type="bibr" rid="B12">12</xref>, <xref ref-type="bibr" rid="B20">20</xref>&#x2013;<xref ref-type="bibr" rid="B24">24</xref>). AI-CAD appears to supplement self-learning by offering direct assistance on specific cases, rather than just amplifying the learning effect. Unlike digital self-learning, AI-CAD assistance was effective for all readers, regardless of whether they were from the radiology department or others.</p>
<p>Additionally, as K-TIRADS is predominantly used for image interpretation in Korea, we also sought to ascertain whether the self-learning program had an impact on K-TIRADS assessment. Although the overall ICC for K-TIRADS assessment did not improve with self-learning, the ICC of readers from other specialties increased to the ICC of radiology residents. While such categorical assessments are known to have high interobserver variability (<xref ref-type="bibr" rid="B25">25</xref>), if we take into consideration that our standard reference group of experienced readers had an ICC of 0.908, we can assume that K-TIRADS assessments by inexperienced readers need further calibration. The challenges of these assessments appear hard to overcome with image-diagnosis set training.</p>
<p>Our study was conducted entirely on an online platform, enabling participants to learn at their own pace and schedule. This approach facilitated the recruitment of participants from hospitals located in diverse regions. One major advantage of online learning is its ability to reduce the burden on instructors, offer flexibility in terms of time and location, and provide consistent education to a broad audience (<xref ref-type="bibr" rid="B26">26</xref>). The proliferation of online learning, especially post-COVID, means that learners today have a strong propensity for web- and social media-based curricula (<xref ref-type="bibr" rid="B27">27</xref>, <xref ref-type="bibr" rid="B28">28</xref>). However, US education isn&#x2019;t just about gaining knowledge; it encompasses the development of psychomotor skills, visual perception for image acquisition, interpretation, and integration into medical decision-making (<xref ref-type="bibr" rid="B29">29</xref>). While our online self-learning can address some of these aspects, we anticipate it being particularly effective as a preparatory step to enhance diagnostic performance and boost confidence before trainees handle real clinical situations.</p>
<p>Similarly, AI-based diagnostic augmentation has shown comparable trends in improving diagnostic performance across other medical fields such as dermatology, cardiology, and oncology, where it enhances accuracy and aids less experienced practitioners. The success of these applications suggests that the learning methods employed in our study could potentially be adapted to these fields. In line with expanding our understanding of AI&#x2019;s utility in medical training, further research could involve testing readers of different experience levels, including senior radiology residents, fellows, and junior faculty. Such studies would help ascertain if even more senior readers can benefit from AI, potentially broadening the scope of AI tools in supporting ongoing professional development and decision-making processes across various stages of a medical career.</p>
<p>There are some limitations to our study. First, since our approach was entirely based on online learning and testing, we had limited control over the learning process. Although we restricted the learning period to two weeks, outcomes might differ between participants who studied intensively and those who learned sporadically. Second, we assessed the overall effects on 26 learners from various medical departments, but the standard deviation of performance due to their different specialty backgrounds was substantial, especially for readers from other specialties than radiology. This variability makes it challenging to achieve statistical significance. Third, we evaluated performance based on binary diagnoses, which may seem overly simplistic. Finally, although we provided a set of 3000 cases for the one-time self-learning session, repetitive training might change the results.</p>
<p>In conclusion, In conclusion, our study demonstrated that while AI-CAD assists all inexperienced readers in improving diagnostic performance for thyroid nodules, the effectiveness of self-learning appears more pronounced in radiology residents, likely due to their prior ultrasonography knowledge. Further studies could explore its impact on other non-radiologist groups.</p>
</sec>
<sec id="s6" sec-type="data-availability">
<title>Data availability statement</title>
<p>The raw data supporting the conclusions of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s7" sec-type="ethics-statement">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Ethical Review Board of Severance Hospital. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec id="s8" sec-type="author-contributions">
<title>Author contributions</title>
<p>SL: Data curation, Methodology, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing. HK: Data curation, Methodology, Writing &#x2013; review &amp; editing. HJ: Data curation, Writing &#x2013; review &amp; editing. JJ: Data curation, Writing &#x2013; review &amp; editing. JJ: Data curation, Writing &#x2013; review &amp; editing. JL: Data curation, Writing &#x2013; review &amp; editing. HH: Methodology, Visualization, Writing &#x2013; review &amp; editing. EL: Formal analysis, Software, Writing &#x2013; review &amp; editing. DK: Formal analysis, Software, Writing &#x2013; review &amp; editing. JK: Conceptualization, Funding acquisition, Project administration, Supervision, Writing &#x2013; original draft, Writing &#x2013; review &amp; editing.</p>
</sec>
</body>
<back>
<sec id="s9" sec-type="funding-information">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This study was supported by the National Research Foundation of Korea (NRF) grant funded by the Korean government (MSIT) (2021R1A2C2007492). The funders had no role in study design, data collection and analysis, decision to publish, or manuscript preparation.</p>
</sec>
<sec id="s10" sec-type="COI-statement">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="s11" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s12" sec-type="supplementary-material">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fendo.2024.1372397/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fendo.2024.1372397/full#supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="Table_1.docx" id="SM1" mimetype="application/vnd.openxmlformats-officedocument.wordprocessingml.document"/>
</sec>
<fn-group>
<title>Abbreviations</title>
<fn fn-type="abbr">
<p>AI-CAD, artificial intelligence-based computer-assisted diagnosis; K-TIRADS, Korean Thyroid Imaging Reporting and Data System; AUC, area under the receiver operating characteristic curve; ICC, intraclass correlation coefficients.</p>
</fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Tessler</surname> <given-names>FN</given-names>
</name>
<name>
<surname>Middleton</surname> <given-names>WD</given-names>
</name>
<name>
<surname>Grant</surname> <given-names>EG</given-names>
</name>
<name>
<surname>Hoang</surname> <given-names>JK</given-names>
</name>
<name>
<surname>Berland</surname> <given-names>LL</given-names>
</name>
<name>
<surname>Teefey</surname> <given-names>SA</given-names>
</name>
<etal/>
</person-group>. <article-title>ACR thyroid imaging, reporting and data system (TI-RADS): white paper of the ACR TI-RADS committee</article-title>. <source>J Am Coll Radiol</source>. (<year>2017</year>) <volume>14</volume>:<page-range>587&#x2013;95</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.jacr.2017.01.046</pub-id>
</citation>
</ref>
<ref id="B2">
<label>2</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Peng</surname> <given-names>S</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Lv</surname> <given-names>W</given-names>
</name>
<name>
<surname>Liu</surname> <given-names>L</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Yang</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>Deep learning-based artificial intelligence model to assist thyroid nodule diagnosis and management: a multicentre diagnostic study</article-title>. <source>Lancet Digit Health</source>. (<year>2021</year>) <volume>3</volume>:<page-range>e250&#x2013;e9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S2589&#x2013;7500(21)00041&#x2013;8</pub-id>
</citation>
</ref>
<ref id="B3">
<label>3</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kwak</surname> <given-names>JY</given-names>
</name>
<name>
<surname>Han</surname> <given-names>KH</given-names>
</name>
<name>
<surname>Yoon</surname> <given-names>JH</given-names>
</name>
<name>
<surname>Moon</surname> <given-names>HJ</given-names>
</name>
<name>
<surname>Son</surname> <given-names>EJ</given-names>
</name>
<name>
<surname>Park</surname> <given-names>SH</given-names>
</name>
<etal/>
</person-group>. <article-title>Thyroid imaging reporting and data system for US features of nodules: a step in establishing better stratification of cancer risk</article-title>. <source>Radiology</source>. (<year>2011</year>) <volume>260</volume>:<page-range>892&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.11110206</pub-id>
</citation>
</ref>
<ref id="B4">
<label>4</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname> <given-names>EK</given-names>
</name>
<name>
<surname>Park</surname> <given-names>CS</given-names>
</name>
<name>
<surname>Chung</surname> <given-names>WY</given-names>
</name>
<name>
<surname>Oh</surname> <given-names>KK</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>DI</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>JT</given-names>
</name>
<etal/>
</person-group>. <article-title>New sonographic criteria for recommending fine-needle aspiration biopsy of nonpalpable solid nodules of the thyroid</article-title>. <source>AJR Am J Roentgenol</source>. (<year>2002</year>) <volume>178</volume>:<page-range>687&#x2013;91</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2214/ajr.178.3.1780687</pub-id>
</citation>
</ref>
<ref id="B5">
<label>5</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Joo</surname> <given-names>L</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>MK</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>JY</given-names>
</name>
<name>
<surname>Ha</surname> <given-names>EJ</given-names>
</name>
<name>
<surname>Na</surname> <given-names>DG</given-names>
</name>
</person-group>. <article-title>Diagnostic performance of ultrasound-based risk stratification systems for thyroid nodules: A systematic review and meta-analysis</article-title>. <source>Endocrinol Metab (Seoul)</source>. (<year>2023</year>) <volume>38</volume>:<page-range>117&#x2013;28</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3803/EnM.2023.1670</pub-id>
</citation>
</ref>
<ref id="B6">
<label>6</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dietrich</surname> <given-names>CF</given-names>
</name>
<name>
<surname>Hoffmann</surname> <given-names>B</given-names>
</name>
<name>
<surname>Abramowicz</surname> <given-names>J</given-names>
</name>
<name>
<surname>Badea</surname> <given-names>R</given-names>
</name>
<name>
<surname>Braden</surname> <given-names>B</given-names>
</name>
<name>
<surname>Cantisani</surname> <given-names>V</given-names>
</name>
<etal/>
</person-group>. <article-title>Medical student ultrasound education: A WFUMB position paper</article-title>. <source>Ultrasound Med Biol</source>. (<year>2019</year>) <volume>45</volume>:<page-range>271&#x2013;81</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ultrasmedbio.2018.09.017</pub-id>
</citation>
</ref>
<ref id="B7">
<label>7</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hertzberg</surname> <given-names>BS</given-names>
</name>
<name>
<surname>Kliewer</surname> <given-names>MA</given-names>
</name>
<name>
<surname>Bowie</surname> <given-names>JD</given-names>
</name>
<name>
<surname>Carroll</surname> <given-names>BA</given-names>
</name>
<name>
<surname>DeLong</surname> <given-names>DH</given-names>
</name>
<name>
<surname>Gray</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>Physician training requirements in sonography: how many cases are needed for competence</article-title>? <source>AJR Am J Roentgenol</source>. (<year>2000</year>) <volume>174</volume>:<page-range>1221&#x2013;7</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2214/ajr.174.5.1741221</pub-id>
</citation>
</ref>
<ref id="B8">
<label>8</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gracias</surname> <given-names>VH</given-names>
</name>
<name>
<surname>Frankel</surname> <given-names>HL</given-names>
</name>
<name>
<surname>Gupta</surname> <given-names>R</given-names>
</name>
<name>
<surname>Malcynski</surname> <given-names>J</given-names>
</name>
<name>
<surname>Gandhi</surname> <given-names>R</given-names>
</name>
<name>
<surname>Collazzo</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>Defining the learning curve for the Focused Abdominal Sonogram for Trauma (FAST) examination: implications for credentialing</article-title>. <source>Am Surg</source>. (<year>2001</year>) <volume>67</volume>:<page-range>364&#x2013;8</page-range>. doi: <pub-id pub-id-type="doi">10.1177/000313480106700414</pub-id>
</citation>
</ref>
<ref id="B9">
<label>9</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Liang</surname> <given-names>K</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>L</given-names>
</name>
<name>
<surname>Lai</surname> <given-names>C</given-names>
</name>
<name>
<surname>Li</surname> <given-names>R</given-names>
</name>
<name>
<surname>Yi</surname> <given-names>L</given-names>
</name>
<etal/>
</person-group>. <article-title>Small lesion classification on abbreviated breast MRI: training can improve diagnostic performance and inter-reader agreement</article-title>. <source>Eur Radiol</source>. (<year>2022</year>) <volume>32</volume>:<page-range>5742&#x2013;51</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330&#x2013;022-08622&#x2013;9</pub-id>
</citation>
</ref>
<ref id="B10">
<label>10</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kim</surname> <given-names>HG</given-names>
</name>
<name>
<surname>Kwak</surname> <given-names>JY</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>EK</given-names>
</name>
<name>
<surname>Choi</surname> <given-names>SH</given-names>
</name>
<name>
<surname>Moon</surname> <given-names>HJ</given-names>
</name>
</person-group>. <article-title>Man to man training: can it help improve the diagnostic performances and interobserver variabilities of thyroid ultrasonography in residents</article-title>? <source>Eur J Radiol</source>. (<year>2012</year>) <volume>81</volume>:<page-range>e352&#x2013;6</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ejrad.2011.11.011</pub-id>
</citation>
</ref>
<ref id="B11">
<label>11</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Leeuwenburgh</surname> <given-names>MM</given-names>
</name>
<name>
<surname>Wiarda</surname> <given-names>BM</given-names>
</name>
<name>
<surname>Bipat</surname> <given-names>S</given-names>
</name>
<name>
<surname>Nio</surname> <given-names>CY</given-names>
</name>
<name>
<surname>Bollen</surname> <given-names>TL</given-names>
</name>
<name>
<surname>Kardux</surname> <given-names>JJ</given-names>
</name>
<etal/>
</person-group>. <article-title>Acute appendicitis on abdominal MR images: training readers to improve diagnostic accuracy</article-title>. <source>Radiology</source>. (<year>2012</year>) <volume>264</volume>:<page-range>455&#x2013;63</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.12111896</pub-id>
</citation>
</ref>
<ref id="B12">
<label>12</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Koh</surname> <given-names>J</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>E</given-names>
</name>
<name>
<surname>Han</surname> <given-names>K</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>EK</given-names>
</name>
<name>
<surname>Son</surname> <given-names>EJ</given-names>
</name>
<name>
<surname>Sohn</surname> <given-names>YM</given-names>
</name>
<etal/>
</person-group>. <article-title>Diagnosis of thyroid nodules on ultrasonography by a deep convolutional neural network</article-title>. <source>Sci Rep</source>. (<year>2020</year>) <volume>10</volume>:<fpage>15245</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598&#x2013;020-72270&#x2013;6</pub-id>
</citation>
</ref>
<ref id="B13">
<label>13</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yoon</surname> <given-names>J</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>E</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>HS</given-names>
</name>
<name>
<surname>Cho</surname> <given-names>S</given-names>
</name>
<name>
<surname>Son</surname> <given-names>J</given-names>
</name>
<name>
<surname>Kwon</surname> <given-names>H</given-names>
</name>
<etal/>
</person-group>. <article-title>Learnability of thyroid nodule assessment on ultrasonography: using a big data set</article-title>. <source>Ultrasound Med Biol</source>. (<year>2023</year>) <volume>49</volume>:<page-range>2581&#x2013;89</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/j.ultrasmedbio.2023.08.026</pub-id>
</citation>
</ref>
<ref id="B14">
<label>14</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ha</surname> <given-names>EJ</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>JH</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>DH</given-names>
</name>
<name>
<surname>Moon</surname> <given-names>J</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>H</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>YN</given-names>
</name>
<etal/>
</person-group>. <article-title>Artificial intelligence model assisting thyroid nodule diagnosis and management: A multicenter diagnostic study</article-title>. <source>J Clin Endocrinol Metab</source>. (<year>2023</year>) <volume>109</volume>:<page-range>527&#x2013;35</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1210/clinem/dgad503</pub-id>
</citation>
</ref>
<ref id="B15">
<label>15</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>He</surname> <given-names>LT</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>FJ</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>DZ</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>YX</given-names>
</name>
<name>
<surname>Li</surname> <given-names>YS</given-names>
</name>
<name>
<surname>Tang</surname> <given-names>MX</given-names>
</name>
<etal/>
</person-group>. <article-title>A comparison of the performances of artificial intelligence system and radiologists in the ultrasound diagnosis of thyroid nodules</article-title>. <source>Curr Med Imaging</source>. (<year>2022</year>) <volume>18</volume>:<page-range>1369&#x2013;77</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.2174/1573405618666220422132251</pub-id>
</citation>
</ref>
<ref id="B16">
<label>16</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Chen</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Wang</surname> <given-names>Y</given-names>
</name>
</person-group>. <article-title>A clinical assessment of an ultrasound computer-aided diagnosis system in differentiating thyroid nodules with radiologists of different diagnostic experience</article-title>. <source>Front Oncol</source>. (<year>2020</year>) <volume>10</volume>:<elocation-id>557169</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3389/fonc.2020.557169</pub-id>
</citation>
</ref>
<ref id="B17">
<label>17</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wildman-Tobriner</surname> <given-names>B</given-names>
</name>
<name>
<surname>Buda</surname> <given-names>M</given-names>
</name>
<name>
<surname>Hoang</surname> <given-names>JK</given-names>
</name>
<name>
<surname>Middleton</surname> <given-names>WD</given-names>
</name>
<name>
<surname>Thayer</surname> <given-names>D</given-names>
</name>
<name>
<surname>Short</surname> <given-names>RG</given-names>
</name>
<etal/>
</person-group>. <article-title>Using artificial intelligence to revise ACR TI-RADS risk stratification of thyroid nodules: diagnostic accuracy and utility</article-title>. <source>Radiology</source>. (<year>2019</year>) <volume>292</volume>:<page-range>112&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.2019182128</pub-id>
</citation>
</ref>
<ref id="B18">
<label>18</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Gao</surname> <given-names>Z</given-names>
</name>
<name>
<surname>He</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Mai</surname> <given-names>W</given-names>
</name>
<name>
<surname>Li</surname> <given-names>J</given-names>
</name>
<name>
<surname>Zhou</surname> <given-names>M</given-names>
</name>
<etal/>
</person-group>. <article-title>An artificial intelligence model based on ACR TI-RADS characteristics for US diagnosis of thyroid nodules</article-title>. <source>Radiology</source>. (<year>2022</year>) <volume>303</volume>:<page-range>613&#x2013;9</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1148/radiol.211455</pub-id>
</citation>
</ref>
<ref id="B19">
<label>19</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ha</surname> <given-names>EJ</given-names>
</name>
<name>
<surname>Chung</surname> <given-names>SR</given-names>
</name>
<name>
<surname>Na</surname> <given-names>DG</given-names>
</name>
<name>
<surname>Ahn</surname> <given-names>HS</given-names>
</name>
<name>
<surname>Chung</surname> <given-names>J</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>JY</given-names>
</name>
<etal/>
</person-group>. <article-title>Korean thyroid imaging reporting and data system and imaging-Based management of thyroid nodules: korean society of thyroid radiology consensus statement and recommendations</article-title>. <source>Korean J Radiol</source>. (<year>2021</year>) <volume>22</volume>:<page-range>2094&#x2013;123</page-range>. doi: <pub-id pub-id-type="doi">10.3348/kjr.2021.0713</pub-id>
</citation>
</ref>
<ref id="B20">
<label>20</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>T</given-names>
</name>
<name>
<surname>Jiang</surname> <given-names>Z</given-names>
</name>
<name>
<surname>Lu</surname> <given-names>M</given-names>
</name>
<name>
<surname>Zou</surname> <given-names>S</given-names>
</name>
<name>
<surname>Wu</surname> <given-names>M</given-names>
</name>
<name>
<surname>Wei</surname> <given-names>T</given-names>
</name>
<etal/>
</person-group>. <article-title>Computer-aided diagnosis system of thyroid nodules ultrasonography: Diagnostic performance difference between computer-aided diagnosis and 111 radiologists</article-title>. <source>Med (United States)</source>. (<year>2020</year>) <volume>99</volume>:<elocation-id>e20634</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.1097/MD.0000000000020634</pub-id>
</citation>
</ref>
<ref id="B21">
<label>21</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jeong</surname> <given-names>EY</given-names>
</name>
<name>
<surname>Kim</surname> <given-names>HL</given-names>
</name>
<name>
<surname>Ha</surname> <given-names>EJ</given-names>
</name>
<name>
<surname>Park</surname> <given-names>SY</given-names>
</name>
<name>
<surname>Cho</surname> <given-names>YJ</given-names>
</name>
<name>
<surname>Han</surname> <given-names>M</given-names>
</name>
</person-group>. <article-title>Computer-aided diagnosis system for thyroid nodules on ultrasonography: diagnostic performance and reproducibility based on the experience level of operators</article-title>. <source>Eur Radiol</source>. (<year>2019</year>) <volume>29</volume>:<page-range>1978&#x2013;85</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.1007/s00330&#x2013;018-5772&#x2013;9</pub-id>
</citation>
</ref>
<ref id="B22">
<label>22</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chung</surname> <given-names>SR</given-names>
</name>
<name>
<surname>Baek</surname> <given-names>JH</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>MK</given-names>
</name>
<name>
<surname>Ahn</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Choi</surname> <given-names>YJ</given-names>
</name>
<name>
<surname>Sung</surname> <given-names>TY</given-names>
</name>
<etal/>
</person-group>. <article-title>Computer-aided diagnosis system for the evaluation of thyroid nodules on ultrasonography: Prospective non-inferiority study according to the experience level of radiologists</article-title>. <source>Korean J Radiol</source>. (<year>2020</year>) <volume>21</volume>:<page-range>369&#x2013;76</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3348/kjr.2019.0581</pub-id>
</citation>
</ref>
<ref id="B23">
<label>23</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname> <given-names>X</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>S</given-names>
</name>
<name>
<surname>Zhang</surname> <given-names>Q</given-names>
</name>
<name>
<surname>Wei</surname> <given-names>X</given-names>
</name>
<name>
<surname>Pan</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Zhao</surname> <given-names>J</given-names>
</name>
<etal/>
</person-group>. <article-title>Diagnosis of thyroid cancer using deep convolutional neural network models applied to sonographic images: a retrospective, multicohort, diagnostic study</article-title>. <source>Lancet Oncol</source>. (<year>2019</year>) <volume>20</volume>:<fpage>193</fpage>&#x2013;<lpage>201</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1016/S1470&#x2013;2045(18)30762&#x2013;9</pub-id>
</citation>
</ref>
<ref id="B24">
<label>24</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kang</surname> <given-names>S</given-names>
</name>
<name>
<surname>Lee</surname> <given-names>E</given-names>
</name>
<name>
<surname>Chung</surname> <given-names>CW</given-names>
</name>
<name>
<surname>Jang</surname> <given-names>HN</given-names>
</name>
<name>
<surname>Moon</surname> <given-names>JH</given-names>
</name>
<name>
<surname>Shin</surname> <given-names>Y</given-names>
</name>
<etal/>
</person-group>. <article-title>A beneficial role of computer-aided diagnosis system for less experienced physicians in the diagnosis of thyroid nodule on ultrasound</article-title>. <source>Sci Rep</source>. (<year>2021</year>) <volume>11</volume>:<fpage>20448</fpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1038/s41598&#x2013;021-99983&#x2013;6</pub-id>
</citation>
</ref>
<ref id="B25">
<label>25</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Grani</surname> <given-names>G</given-names>
</name>
<name>
<surname>Lamartina</surname> <given-names>L</given-names>
</name>
<name>
<surname>Cantisani</surname> <given-names>V</given-names>
</name>
<name>
<surname>Maranghi</surname> <given-names>M</given-names>
</name>
<name>
<surname>Lucia</surname> <given-names>P</given-names>
</name>
<name>
<surname>Durante</surname> <given-names>C</given-names>
</name>
</person-group>. <article-title>Interobserver agreement of various thyroid imaging reporting and data systems</article-title>. <source>Endocrine Connections</source>. (<year>2018</year>) <volume>7</volume>:<fpage>1</fpage>&#x2013;<lpage>7</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.1530/EC-17&#x2013;0336</pub-id>
</citation>
</ref>
<ref id="B26">
<label>26</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Safapour</surname> <given-names>E</given-names>
</name>
<name>
<surname>Kermanshachi</surname> <given-names>S</given-names>
</name>
<name>
<surname>Taneja</surname> <given-names>P</given-names>
</name>
</person-group>. <article-title>A review of nontraditional teaching methods: flipped classroom, gamification, case study, self-learning, and social media</article-title>. <source>Educ Sci.</source> (<year>2019</year>) <volume>9</volume>(<issue>4</issue>):<fpage>273</fpage>. doi: <pub-id pub-id-type="doi">10.3390/educsci9040273</pub-id>
</citation>
</ref>
<ref id="B27">
<label>27</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bahner</surname> <given-names>DP</given-names>
</name>
<name>
<surname>Adkins</surname> <given-names>E</given-names>
</name>
<name>
<surname>Patel</surname> <given-names>N</given-names>
</name>
<name>
<surname>Donley</surname> <given-names>C</given-names>
</name>
<name>
<surname>Nagel</surname> <given-names>R</given-names>
</name>
<name>
<surname>Kman</surname> <given-names>NE</given-names>
</name>
</person-group>. <article-title>How we use social media to supplement a novel curriculum in medical education</article-title>. <source>Med Teach</source>. (<year>2012</year>) <volume>34</volume>:<page-range>439&#x2013;44</page-range>. doi:&#xa0;<pub-id pub-id-type="doi">10.3109/0142159x.2012.668245</pub-id>
</citation>
</ref>
<ref id="B28">
<label>28</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Naciri</surname> <given-names>A</given-names>
</name>
<name>
<surname>Radid</surname> <given-names>M</given-names>
</name>
<name>
<surname>Kharbach</surname> <given-names>A</given-names>
</name>
<name>
<surname>Chemsi</surname> <given-names>G</given-names>
</name>
</person-group>. <article-title>E-learning in health professions education during the COVID-19 pandemic: a systematic review</article-title>. <source>J Educ Eval Health Prof</source>. (<year>2021</year>) <volume>18</volume>:<elocation-id>27</elocation-id>. doi:&#xa0;<pub-id pub-id-type="doi">10.3352/jeehp.2021.18.27</pub-id>
</citation>
</ref>
<ref id="B29">
<label>29</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lewiss</surname> <given-names>RE</given-names>
</name>
<name>
<surname>Hoffmann</surname> <given-names>B</given-names>
</name>
<name>
<surname>Beaulieu</surname> <given-names>Y</given-names>
</name>
<name>
<surname>Phelan</surname> <given-names>MB</given-names>
</name>
</person-group>. <article-title>Point-of-care ultrasound education: the increasing role of simulation and multimedia resources</article-title>. <source>J Ultrasound Med</source>. (<year>2014</year>) <volume>33</volume>:<fpage>27</fpage>&#x2013;<lpage>32</lpage>. doi:&#xa0;<pub-id pub-id-type="doi">10.7863/ultra.33.1.27</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>